1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 43 #include "amdgpu_ras.h" 44 #include "amdgpu_securedisplay.h" 45 #include "amdgpu_atomfirmware.h" 46 47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) 48 49 static int psp_load_smu_fw(struct psp_context *psp); 50 static int psp_rap_terminate(struct psp_context *psp); 51 static int psp_securedisplay_terminate(struct psp_context *psp); 52 53 static int psp_ring_init(struct psp_context *psp, 54 enum psp_ring_type ring_type) 55 { 56 int ret = 0; 57 struct psp_ring *ring; 58 struct amdgpu_device *adev = psp->adev; 59 60 ring = &psp->km_ring; 61 62 ring->ring_type = ring_type; 63 64 /* allocate 4k Page of Local Frame Buffer memory for ring */ 65 ring->ring_size = 0x1000; 66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 67 AMDGPU_GEM_DOMAIN_VRAM | 68 AMDGPU_GEM_DOMAIN_GTT, 69 &adev->firmware.rbuf, 70 &ring->ring_mem_mc_addr, 71 (void **)&ring->ring_mem); 72 if (ret) { 73 ring->ring_size = 0; 74 return ret; 75 } 76 77 return 0; 78 } 79 80 /* 81 * Due to DF Cstate management centralized to PMFW, the firmware 82 * loading sequence will be updated as below: 83 * - Load KDB 84 * - Load SYS_DRV 85 * - Load tOS 86 * - Load PMFW 87 * - Setup TMR 88 * - Load other non-psp fw 89 * - Load ASD 90 * - Load XGMI/RAS/HDCP/DTM TA if any 91 * 92 * This new sequence is required for 93 * - Arcturus and onwards 94 */ 95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 96 { 97 struct amdgpu_device *adev = psp->adev; 98 99 if (amdgpu_sriov_vf(adev)) { 100 psp->pmfw_centralized_cstate_management = false; 101 return; 102 } 103 104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 105 case IP_VERSION(11, 0, 0): 106 case IP_VERSION(11, 0, 4): 107 case IP_VERSION(11, 0, 5): 108 case IP_VERSION(11, 0, 7): 109 case IP_VERSION(11, 0, 9): 110 case IP_VERSION(11, 0, 11): 111 case IP_VERSION(11, 0, 12): 112 case IP_VERSION(11, 0, 13): 113 case IP_VERSION(13, 0, 0): 114 case IP_VERSION(13, 0, 2): 115 case IP_VERSION(13, 0, 7): 116 psp->pmfw_centralized_cstate_management = true; 117 break; 118 default: 119 psp->pmfw_centralized_cstate_management = false; 120 break; 121 } 122 } 123 124 static int psp_init_sriov_microcode(struct psp_context *psp) 125 { 126 struct amdgpu_device *adev = psp->adev; 127 char ucode_prefix[30]; 128 int ret = 0; 129 130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 131 132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 133 case IP_VERSION(9, 0, 0): 134 case IP_VERSION(11, 0, 7): 135 case IP_VERSION(11, 0, 9): 136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 137 ret = psp_init_cap_microcode(psp, ucode_prefix); 138 break; 139 case IP_VERSION(13, 0, 2): 140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 141 ret = psp_init_cap_microcode(psp, ucode_prefix); 142 ret &= psp_init_ta_microcode(psp, ucode_prefix); 143 break; 144 case IP_VERSION(13, 0, 0): 145 adev->virt.autoload_ucode_id = 0; 146 break; 147 case IP_VERSION(13, 0, 6): 148 ret = psp_init_cap_microcode(psp, ucode_prefix); 149 ret &= psp_init_ta_microcode(psp, ucode_prefix); 150 break; 151 case IP_VERSION(13, 0, 10): 152 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 153 ret = psp_init_cap_microcode(psp, ucode_prefix); 154 break; 155 default: 156 return -EINVAL; 157 } 158 return ret; 159 } 160 161 static int psp_early_init(void *handle) 162 { 163 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 164 struct psp_context *psp = &adev->psp; 165 166 psp->autoload_supported = true; 167 psp->boot_time_tmr = true; 168 169 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 170 case IP_VERSION(9, 0, 0): 171 psp_v3_1_set_psp_funcs(psp); 172 psp->autoload_supported = false; 173 psp->boot_time_tmr = false; 174 break; 175 case IP_VERSION(10, 0, 0): 176 case IP_VERSION(10, 0, 1): 177 psp_v10_0_set_psp_funcs(psp); 178 psp->autoload_supported = false; 179 psp->boot_time_tmr = false; 180 break; 181 case IP_VERSION(11, 0, 2): 182 case IP_VERSION(11, 0, 4): 183 psp_v11_0_set_psp_funcs(psp); 184 psp->autoload_supported = false; 185 psp->boot_time_tmr = false; 186 break; 187 case IP_VERSION(11, 0, 0): 188 case IP_VERSION(11, 0, 7): 189 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 190 fallthrough; 191 case IP_VERSION(11, 0, 5): 192 case IP_VERSION(11, 0, 9): 193 case IP_VERSION(11, 0, 11): 194 case IP_VERSION(11, 5, 0): 195 case IP_VERSION(11, 0, 12): 196 case IP_VERSION(11, 0, 13): 197 psp_v11_0_set_psp_funcs(psp); 198 psp->boot_time_tmr = false; 199 break; 200 case IP_VERSION(11, 0, 3): 201 case IP_VERSION(12, 0, 1): 202 psp_v12_0_set_psp_funcs(psp); 203 psp->autoload_supported = false; 204 psp->boot_time_tmr = false; 205 break; 206 case IP_VERSION(13, 0, 2): 207 psp->boot_time_tmr = false; 208 fallthrough; 209 case IP_VERSION(13, 0, 6): 210 psp_v13_0_set_psp_funcs(psp); 211 psp->autoload_supported = false; 212 break; 213 case IP_VERSION(13, 0, 1): 214 case IP_VERSION(13, 0, 3): 215 case IP_VERSION(13, 0, 5): 216 case IP_VERSION(13, 0, 8): 217 case IP_VERSION(13, 0, 11): 218 case IP_VERSION(14, 0, 0): 219 case IP_VERSION(14, 0, 1): 220 psp_v13_0_set_psp_funcs(psp); 221 psp->boot_time_tmr = false; 222 break; 223 case IP_VERSION(11, 0, 8): 224 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 225 psp_v11_0_8_set_psp_funcs(psp); 226 } 227 psp->autoload_supported = false; 228 psp->boot_time_tmr = false; 229 break; 230 case IP_VERSION(13, 0, 0): 231 case IP_VERSION(13, 0, 7): 232 case IP_VERSION(13, 0, 10): 233 psp_v13_0_set_psp_funcs(psp); 234 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 235 psp->boot_time_tmr = false; 236 break; 237 case IP_VERSION(13, 0, 4): 238 psp_v13_0_4_set_psp_funcs(psp); 239 psp->boot_time_tmr = false; 240 break; 241 case IP_VERSION(14, 0, 2): 242 case IP_VERSION(14, 0, 3): 243 psp_v14_0_set_psp_funcs(psp); 244 break; 245 default: 246 return -EINVAL; 247 } 248 249 psp->adev = adev; 250 251 adev->psp_timeout = 20000; 252 253 psp_check_pmfw_centralized_cstate_management(psp); 254 255 if (amdgpu_sriov_vf(adev)) 256 return psp_init_sriov_microcode(psp); 257 else 258 return psp_init_microcode(psp); 259 } 260 261 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 262 { 263 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 264 &mem_ctx->shared_buf); 265 mem_ctx->shared_bo = NULL; 266 } 267 268 static void psp_free_shared_bufs(struct psp_context *psp) 269 { 270 void *tmr_buf; 271 void **pptr; 272 273 /* free TMR memory buffer */ 274 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 275 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 276 psp->tmr_bo = NULL; 277 278 /* free xgmi shared memory */ 279 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 280 281 /* free ras shared memory */ 282 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 283 284 /* free hdcp shared memory */ 285 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 286 287 /* free dtm shared memory */ 288 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 289 290 /* free rap shared memory */ 291 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 292 293 /* free securedisplay shared memory */ 294 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 295 296 297 } 298 299 static void psp_memory_training_fini(struct psp_context *psp) 300 { 301 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 302 303 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 304 kfree(ctx->sys_cache); 305 ctx->sys_cache = NULL; 306 } 307 308 static int psp_memory_training_init(struct psp_context *psp) 309 { 310 int ret; 311 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 312 313 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 314 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 315 return 0; 316 } 317 318 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 319 if (ctx->sys_cache == NULL) { 320 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 321 ret = -ENOMEM; 322 goto Err_out; 323 } 324 325 dev_dbg(psp->adev->dev, 326 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 327 ctx->train_data_size, 328 ctx->p2c_train_data_offset, 329 ctx->c2p_train_data_offset); 330 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 331 return 0; 332 333 Err_out: 334 psp_memory_training_fini(psp); 335 return ret; 336 } 337 338 /* 339 * Helper funciton to query psp runtime database entry 340 * 341 * @adev: amdgpu_device pointer 342 * @entry_type: the type of psp runtime database entry 343 * @db_entry: runtime database entry pointer 344 * 345 * Return false if runtime database doesn't exit or entry is invalid 346 * or true if the specific database entry is found, and copy to @db_entry 347 */ 348 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 349 enum psp_runtime_entry_type entry_type, 350 void *db_entry) 351 { 352 uint64_t db_header_pos, db_dir_pos; 353 struct psp_runtime_data_header db_header = {0}; 354 struct psp_runtime_data_directory db_dir = {0}; 355 bool ret = false; 356 int i; 357 358 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) 359 return false; 360 361 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 362 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 363 364 /* read runtime db header from vram */ 365 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 366 sizeof(struct psp_runtime_data_header), false); 367 368 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 369 /* runtime db doesn't exist, exit */ 370 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 371 return false; 372 } 373 374 /* read runtime database entry from vram */ 375 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 376 sizeof(struct psp_runtime_data_directory), false); 377 378 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 379 /* invalid db entry count, exit */ 380 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 381 return false; 382 } 383 384 /* look up for requested entry type */ 385 for (i = 0; i < db_dir.entry_count && !ret; i++) { 386 if (db_dir.entry_list[i].entry_type == entry_type) { 387 switch (entry_type) { 388 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 389 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 390 /* invalid db entry size */ 391 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 392 return false; 393 } 394 /* read runtime database entry */ 395 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 396 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 397 ret = true; 398 break; 399 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 400 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 401 /* invalid db entry size */ 402 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 403 return false; 404 } 405 /* read runtime database entry */ 406 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 407 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 408 ret = true; 409 break; 410 default: 411 ret = false; 412 break; 413 } 414 } 415 } 416 417 return ret; 418 } 419 420 static int psp_sw_init(void *handle) 421 { 422 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 423 struct psp_context *psp = &adev->psp; 424 int ret; 425 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 426 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 427 struct psp_runtime_scpm_entry scpm_entry; 428 429 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 430 if (!psp->cmd) { 431 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 432 ret = -ENOMEM; 433 } 434 435 adev->psp.xgmi_context.supports_extended_data = 436 !adev->gmc.xgmi.connected_to_cpu && 437 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 438 439 memset(&scpm_entry, 0, sizeof(scpm_entry)); 440 if ((psp_get_runtime_db_entry(adev, 441 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 442 &scpm_entry)) && 443 (scpm_entry.scpm_status != SCPM_DISABLE)) { 444 adev->scpm_enabled = true; 445 adev->scpm_status = scpm_entry.scpm_status; 446 } else { 447 adev->scpm_enabled = false; 448 adev->scpm_status = SCPM_DISABLE; 449 } 450 451 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 452 453 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 454 if (psp_get_runtime_db_entry(adev, 455 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 456 &boot_cfg_entry)) { 457 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 458 if ((psp->boot_cfg_bitmask) & 459 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 460 /* If psp runtime database exists, then 461 * only enable two stage memory training 462 * when TWO_STAGE_DRAM_TRAINING bit is set 463 * in runtime database 464 */ 465 mem_training_ctx->enable_mem_training = true; 466 } 467 468 } else { 469 /* If psp runtime database doesn't exist or is 470 * invalid, force enable two stage memory training 471 */ 472 mem_training_ctx->enable_mem_training = true; 473 } 474 475 if (mem_training_ctx->enable_mem_training) { 476 ret = psp_memory_training_init(psp); 477 if (ret) { 478 dev_err(adev->dev, "Failed to initialize memory training!\n"); 479 return ret; 480 } 481 482 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 483 if (ret) { 484 dev_err(adev->dev, "Failed to process memory training!\n"); 485 return ret; 486 } 487 } 488 489 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 490 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 491 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 492 &psp->fw_pri_bo, 493 &psp->fw_pri_mc_addr, 494 &psp->fw_pri_buf); 495 if (ret) 496 return ret; 497 498 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 499 AMDGPU_GEM_DOMAIN_VRAM | 500 AMDGPU_GEM_DOMAIN_GTT, 501 &psp->fence_buf_bo, 502 &psp->fence_buf_mc_addr, 503 &psp->fence_buf); 504 if (ret) 505 goto failed1; 506 507 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 508 AMDGPU_GEM_DOMAIN_VRAM | 509 AMDGPU_GEM_DOMAIN_GTT, 510 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 511 (void **)&psp->cmd_buf_mem); 512 if (ret) 513 goto failed2; 514 515 return 0; 516 517 failed2: 518 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 519 &psp->fence_buf_mc_addr, &psp->fence_buf); 520 failed1: 521 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 522 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 523 return ret; 524 } 525 526 static int psp_sw_fini(void *handle) 527 { 528 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 529 struct psp_context *psp = &adev->psp; 530 struct psp_gfx_cmd_resp *cmd = psp->cmd; 531 532 psp_memory_training_fini(psp); 533 534 amdgpu_ucode_release(&psp->sos_fw); 535 amdgpu_ucode_release(&psp->asd_fw); 536 amdgpu_ucode_release(&psp->ta_fw); 537 amdgpu_ucode_release(&psp->cap_fw); 538 amdgpu_ucode_release(&psp->toc_fw); 539 540 kfree(cmd); 541 cmd = NULL; 542 543 psp_free_shared_bufs(psp); 544 545 if (psp->km_ring.ring_mem) 546 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 547 &psp->km_ring.ring_mem_mc_addr, 548 (void **)&psp->km_ring.ring_mem); 549 550 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 551 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 552 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 553 &psp->fence_buf_mc_addr, &psp->fence_buf); 554 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 555 (void **)&psp->cmd_buf_mem); 556 557 return 0; 558 } 559 560 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 561 uint32_t reg_val, uint32_t mask, bool check_changed) 562 { 563 uint32_t val; 564 int i; 565 struct amdgpu_device *adev = psp->adev; 566 567 if (psp->adev->no_hw_access) 568 return 0; 569 570 for (i = 0; i < adev->usec_timeout; i++) { 571 val = RREG32(reg_index); 572 if (check_changed) { 573 if (val != reg_val) 574 return 0; 575 } else { 576 if ((val & mask) == reg_val) 577 return 0; 578 } 579 udelay(1); 580 } 581 582 return -ETIME; 583 } 584 585 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 586 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 587 { 588 uint32_t val; 589 int i; 590 struct amdgpu_device *adev = psp->adev; 591 592 if (psp->adev->no_hw_access) 593 return 0; 594 595 for (i = 0; i < msec_timeout; i++) { 596 val = RREG32(reg_index); 597 if ((val & mask) == reg_val) 598 return 0; 599 msleep(1); 600 } 601 602 return -ETIME; 603 } 604 605 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 606 { 607 switch (cmd_id) { 608 case GFX_CMD_ID_LOAD_TA: 609 return "LOAD_TA"; 610 case GFX_CMD_ID_UNLOAD_TA: 611 return "UNLOAD_TA"; 612 case GFX_CMD_ID_INVOKE_CMD: 613 return "INVOKE_CMD"; 614 case GFX_CMD_ID_LOAD_ASD: 615 return "LOAD_ASD"; 616 case GFX_CMD_ID_SETUP_TMR: 617 return "SETUP_TMR"; 618 case GFX_CMD_ID_LOAD_IP_FW: 619 return "LOAD_IP_FW"; 620 case GFX_CMD_ID_DESTROY_TMR: 621 return "DESTROY_TMR"; 622 case GFX_CMD_ID_SAVE_RESTORE: 623 return "SAVE_RESTORE_IP_FW"; 624 case GFX_CMD_ID_SETUP_VMR: 625 return "SETUP_VMR"; 626 case GFX_CMD_ID_DESTROY_VMR: 627 return "DESTROY_VMR"; 628 case GFX_CMD_ID_PROG_REG: 629 return "PROG_REG"; 630 case GFX_CMD_ID_GET_FW_ATTESTATION: 631 return "GET_FW_ATTESTATION"; 632 case GFX_CMD_ID_LOAD_TOC: 633 return "ID_LOAD_TOC"; 634 case GFX_CMD_ID_AUTOLOAD_RLC: 635 return "AUTOLOAD_RLC"; 636 case GFX_CMD_ID_BOOT_CFG: 637 return "BOOT_CFG"; 638 default: 639 return "UNKNOWN CMD"; 640 } 641 } 642 643 static int 644 psp_cmd_submit_buf(struct psp_context *psp, 645 struct amdgpu_firmware_info *ucode, 646 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 647 { 648 int ret; 649 int index; 650 int timeout = psp->adev->psp_timeout; 651 bool ras_intr = false; 652 bool skip_unsupport = false; 653 654 if (psp->adev->no_hw_access) 655 return 0; 656 657 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 658 659 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 660 661 index = atomic_inc_return(&psp->fence_value); 662 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 663 if (ret) { 664 atomic_dec(&psp->fence_value); 665 goto exit; 666 } 667 668 amdgpu_device_invalidate_hdp(psp->adev, NULL); 669 while (*((unsigned int *)psp->fence_buf) != index) { 670 if (--timeout == 0) 671 break; 672 /* 673 * Shouldn't wait for timeout when err_event_athub occurs, 674 * because gpu reset thread triggered and lock resource should 675 * be released for psp resume sequence. 676 */ 677 ras_intr = amdgpu_ras_intr_triggered(); 678 if (ras_intr) 679 break; 680 usleep_range(10, 100); 681 amdgpu_device_invalidate_hdp(psp->adev, NULL); 682 } 683 684 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 685 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 686 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 687 688 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 689 690 /* In some cases, psp response status is not 0 even there is no 691 * problem while the command is submitted. Some version of PSP FW 692 * doesn't write 0 to that field. 693 * So here we would like to only print a warning instead of an error 694 * during psp initialization to avoid breaking hw_init and it doesn't 695 * return -EINVAL. 696 */ 697 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 698 if (ucode) 699 dev_warn(psp->adev->dev, 700 "failed to load ucode %s(0x%X) ", 701 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 702 dev_warn(psp->adev->dev, 703 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 704 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, 705 psp->cmd_buf_mem->resp.status); 706 /* If any firmware (including CAP) load fails under SRIOV, it should 707 * return failure to stop the VF from initializing. 708 * Also return failure in case of timeout 709 */ 710 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 711 ret = -EINVAL; 712 goto exit; 713 } 714 } 715 716 if (ucode) { 717 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 718 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 719 } 720 721 exit: 722 return ret; 723 } 724 725 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 726 { 727 struct psp_gfx_cmd_resp *cmd = psp->cmd; 728 729 mutex_lock(&psp->mutex); 730 731 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 732 733 return cmd; 734 } 735 736 static void release_psp_cmd_buf(struct psp_context *psp) 737 { 738 mutex_unlock(&psp->mutex); 739 } 740 741 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 742 struct psp_gfx_cmd_resp *cmd, 743 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 744 { 745 struct amdgpu_device *adev = psp->adev; 746 uint32_t size = 0; 747 uint64_t tmr_pa = 0; 748 749 if (tmr_bo) { 750 size = amdgpu_bo_size(tmr_bo); 751 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 752 } 753 754 if (amdgpu_sriov_vf(psp->adev)) 755 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 756 else 757 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 758 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 759 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 760 cmd->cmd.cmd_setup_tmr.buf_size = size; 761 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 762 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 763 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 764 } 765 766 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 767 uint64_t pri_buf_mc, uint32_t size) 768 { 769 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 770 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 771 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 772 cmd->cmd.cmd_load_toc.toc_size = size; 773 } 774 775 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 776 static int psp_load_toc(struct psp_context *psp, 777 uint32_t *tmr_size) 778 { 779 int ret; 780 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 781 782 /* Copy toc to psp firmware private buffer */ 783 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 784 785 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 786 787 ret = psp_cmd_submit_buf(psp, NULL, cmd, 788 psp->fence_buf_mc_addr); 789 if (!ret) 790 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 791 792 release_psp_cmd_buf(psp); 793 794 return ret; 795 } 796 797 /* Set up Trusted Memory Region */ 798 static int psp_tmr_init(struct psp_context *psp) 799 { 800 int ret = 0; 801 int tmr_size; 802 void *tmr_buf; 803 void **pptr; 804 805 /* 806 * According to HW engineer, they prefer the TMR address be "naturally 807 * aligned" , e.g. the start address be an integer divide of TMR size. 808 * 809 * Note: this memory need be reserved till the driver 810 * uninitializes. 811 */ 812 tmr_size = PSP_TMR_SIZE(psp->adev); 813 814 /* For ASICs support RLC autoload, psp will parse the toc 815 * and calculate the total size of TMR needed 816 */ 817 if (!amdgpu_sriov_vf(psp->adev) && 818 psp->toc.start_addr && 819 psp->toc.size_bytes && 820 psp->fw_pri_buf) { 821 ret = psp_load_toc(psp, &tmr_size); 822 if (ret) { 823 dev_err(psp->adev->dev, "Failed to load toc\n"); 824 return ret; 825 } 826 } 827 828 if (!psp->tmr_bo && !psp->boot_time_tmr) { 829 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 830 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 831 PSP_TMR_ALIGNMENT, 832 AMDGPU_HAS_VRAM(psp->adev) ? 833 AMDGPU_GEM_DOMAIN_VRAM : 834 AMDGPU_GEM_DOMAIN_GTT, 835 &psp->tmr_bo, &psp->tmr_mc_addr, 836 pptr); 837 } 838 839 return ret; 840 } 841 842 static bool psp_skip_tmr(struct psp_context *psp) 843 { 844 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 845 case IP_VERSION(11, 0, 9): 846 case IP_VERSION(11, 0, 7): 847 case IP_VERSION(13, 0, 2): 848 case IP_VERSION(13, 0, 6): 849 case IP_VERSION(13, 0, 10): 850 return true; 851 default: 852 return false; 853 } 854 } 855 856 static int psp_tmr_load(struct psp_context *psp) 857 { 858 int ret; 859 struct psp_gfx_cmd_resp *cmd; 860 861 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 862 * Already set up by host driver. 863 */ 864 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 865 return 0; 866 867 cmd = acquire_psp_cmd_buf(psp); 868 869 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 870 if (psp->tmr_bo) 871 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 872 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 873 874 ret = psp_cmd_submit_buf(psp, NULL, cmd, 875 psp->fence_buf_mc_addr); 876 877 release_psp_cmd_buf(psp); 878 879 return ret; 880 } 881 882 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 883 struct psp_gfx_cmd_resp *cmd) 884 { 885 if (amdgpu_sriov_vf(psp->adev)) 886 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 887 else 888 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 889 } 890 891 static int psp_tmr_unload(struct psp_context *psp) 892 { 893 int ret; 894 struct psp_gfx_cmd_resp *cmd; 895 896 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 897 * as TMR is not loaded at all 898 */ 899 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 900 return 0; 901 902 cmd = acquire_psp_cmd_buf(psp); 903 904 psp_prep_tmr_unload_cmd_buf(psp, cmd); 905 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 906 907 ret = psp_cmd_submit_buf(psp, NULL, cmd, 908 psp->fence_buf_mc_addr); 909 910 release_psp_cmd_buf(psp); 911 912 return ret; 913 } 914 915 static int psp_tmr_terminate(struct psp_context *psp) 916 { 917 return psp_tmr_unload(psp); 918 } 919 920 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 921 uint64_t *output_ptr) 922 { 923 int ret; 924 struct psp_gfx_cmd_resp *cmd; 925 926 if (!output_ptr) 927 return -EINVAL; 928 929 if (amdgpu_sriov_vf(psp->adev)) 930 return 0; 931 932 cmd = acquire_psp_cmd_buf(psp); 933 934 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 935 936 ret = psp_cmd_submit_buf(psp, NULL, cmd, 937 psp->fence_buf_mc_addr); 938 939 if (!ret) { 940 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 941 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 942 } 943 944 release_psp_cmd_buf(psp); 945 946 return ret; 947 } 948 949 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 950 { 951 struct psp_context *psp = &adev->psp; 952 struct psp_gfx_cmd_resp *cmd; 953 int ret; 954 955 if (amdgpu_sriov_vf(adev)) 956 return 0; 957 958 cmd = acquire_psp_cmd_buf(psp); 959 960 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 961 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 962 963 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 964 if (!ret) { 965 *boot_cfg = 966 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 967 } 968 969 release_psp_cmd_buf(psp); 970 971 return ret; 972 } 973 974 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 975 { 976 int ret; 977 struct psp_context *psp = &adev->psp; 978 struct psp_gfx_cmd_resp *cmd; 979 980 if (amdgpu_sriov_vf(adev)) 981 return 0; 982 983 cmd = acquire_psp_cmd_buf(psp); 984 985 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 986 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 987 cmd->cmd.boot_cfg.boot_config = boot_cfg; 988 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 989 990 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 991 992 release_psp_cmd_buf(psp); 993 994 return ret; 995 } 996 997 static int psp_rl_load(struct amdgpu_device *adev) 998 { 999 int ret; 1000 struct psp_context *psp = &adev->psp; 1001 struct psp_gfx_cmd_resp *cmd; 1002 1003 if (!is_psp_fw_valid(psp->rl)) 1004 return 0; 1005 1006 cmd = acquire_psp_cmd_buf(psp); 1007 1008 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1009 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1010 1011 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1012 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1013 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1014 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1015 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1016 1017 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1018 1019 release_psp_cmd_buf(psp); 1020 1021 return ret; 1022 } 1023 1024 int psp_spatial_partition(struct psp_context *psp, int mode) 1025 { 1026 struct psp_gfx_cmd_resp *cmd; 1027 int ret; 1028 1029 if (amdgpu_sriov_vf(psp->adev)) 1030 return 0; 1031 1032 cmd = acquire_psp_cmd_buf(psp); 1033 1034 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1035 cmd->cmd.cmd_spatial_part.mode = mode; 1036 1037 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1038 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1039 1040 release_psp_cmd_buf(psp); 1041 1042 return ret; 1043 } 1044 1045 static int psp_asd_initialize(struct psp_context *psp) 1046 { 1047 int ret; 1048 1049 /* If PSP version doesn't match ASD version, asd loading will be failed. 1050 * add workaround to bypass it for sriov now. 1051 * TODO: add version check to make it common 1052 */ 1053 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1054 return 0; 1055 1056 /* bypass asd if display hardware is not available */ 1057 if (!amdgpu_device_has_display_hardware(psp->adev) && 1058 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1059 return 0; 1060 1061 psp->asd_context.mem_context.shared_mc_addr = 0; 1062 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1063 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1064 1065 ret = psp_ta_load(psp, &psp->asd_context); 1066 if (!ret) 1067 psp->asd_context.initialized = true; 1068 1069 return ret; 1070 } 1071 1072 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1073 uint32_t session_id) 1074 { 1075 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1076 cmd->cmd.cmd_unload_ta.session_id = session_id; 1077 } 1078 1079 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1080 { 1081 int ret; 1082 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1083 1084 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1085 1086 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1087 1088 context->resp_status = cmd->resp.status; 1089 1090 release_psp_cmd_buf(psp); 1091 1092 return ret; 1093 } 1094 1095 static int psp_asd_terminate(struct psp_context *psp) 1096 { 1097 int ret; 1098 1099 if (amdgpu_sriov_vf(psp->adev)) 1100 return 0; 1101 1102 if (!psp->asd_context.initialized) 1103 return 0; 1104 1105 ret = psp_ta_unload(psp, &psp->asd_context); 1106 if (!ret) 1107 psp->asd_context.initialized = false; 1108 1109 return ret; 1110 } 1111 1112 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1113 uint32_t id, uint32_t value) 1114 { 1115 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1116 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1117 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1118 } 1119 1120 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1121 uint32_t value) 1122 { 1123 struct psp_gfx_cmd_resp *cmd; 1124 int ret = 0; 1125 1126 if (reg >= PSP_REG_LAST) 1127 return -EINVAL; 1128 1129 cmd = acquire_psp_cmd_buf(psp); 1130 1131 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1132 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1133 if (ret) 1134 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1135 1136 release_psp_cmd_buf(psp); 1137 1138 return ret; 1139 } 1140 1141 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1142 uint64_t ta_bin_mc, 1143 struct ta_context *context) 1144 { 1145 cmd->cmd_id = context->ta_load_type; 1146 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1147 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1148 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1149 1150 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1151 lower_32_bits(context->mem_context.shared_mc_addr); 1152 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1153 upper_32_bits(context->mem_context.shared_mc_addr); 1154 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1155 } 1156 1157 int psp_ta_init_shared_buf(struct psp_context *psp, 1158 struct ta_mem_context *mem_ctx) 1159 { 1160 /* 1161 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1162 * physical) for ta to host memory 1163 */ 1164 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1165 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1166 AMDGPU_GEM_DOMAIN_GTT, 1167 &mem_ctx->shared_bo, 1168 &mem_ctx->shared_mc_addr, 1169 &mem_ctx->shared_buf); 1170 } 1171 1172 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1173 uint32_t ta_cmd_id, 1174 uint32_t session_id) 1175 { 1176 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1177 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1178 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1179 } 1180 1181 int psp_ta_invoke(struct psp_context *psp, 1182 uint32_t ta_cmd_id, 1183 struct ta_context *context) 1184 { 1185 int ret; 1186 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1187 1188 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1189 1190 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1191 psp->fence_buf_mc_addr); 1192 1193 context->resp_status = cmd->resp.status; 1194 1195 release_psp_cmd_buf(psp); 1196 1197 return ret; 1198 } 1199 1200 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1201 { 1202 int ret; 1203 struct psp_gfx_cmd_resp *cmd; 1204 1205 cmd = acquire_psp_cmd_buf(psp); 1206 1207 psp_copy_fw(psp, context->bin_desc.start_addr, 1208 context->bin_desc.size_bytes); 1209 1210 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1211 1212 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1213 psp->fence_buf_mc_addr); 1214 1215 context->resp_status = cmd->resp.status; 1216 1217 if (!ret) 1218 context->session_id = cmd->resp.session_id; 1219 1220 release_psp_cmd_buf(psp); 1221 1222 return ret; 1223 } 1224 1225 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1226 { 1227 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1228 } 1229 1230 int psp_xgmi_terminate(struct psp_context *psp) 1231 { 1232 int ret; 1233 struct amdgpu_device *adev = psp->adev; 1234 1235 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1236 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1237 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1238 adev->gmc.xgmi.connected_to_cpu)) 1239 return 0; 1240 1241 if (!psp->xgmi_context.context.initialized) 1242 return 0; 1243 1244 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1245 1246 psp->xgmi_context.context.initialized = false; 1247 1248 return ret; 1249 } 1250 1251 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1252 { 1253 struct ta_xgmi_shared_memory *xgmi_cmd; 1254 int ret; 1255 1256 if (!psp->ta_fw || 1257 !psp->xgmi_context.context.bin_desc.size_bytes || 1258 !psp->xgmi_context.context.bin_desc.start_addr) 1259 return -ENOENT; 1260 1261 if (!load_ta) 1262 goto invoke; 1263 1264 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1265 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1266 1267 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1268 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1269 if (ret) 1270 return ret; 1271 } 1272 1273 /* Load XGMI TA */ 1274 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1275 if (!ret) 1276 psp->xgmi_context.context.initialized = true; 1277 else 1278 return ret; 1279 1280 invoke: 1281 /* Initialize XGMI session */ 1282 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1283 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1284 xgmi_cmd->flag_extend_link_record = set_extended_data; 1285 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1286 1287 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1288 /* note down the capbility flag for XGMI TA */ 1289 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1290 1291 return ret; 1292 } 1293 1294 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1295 { 1296 struct ta_xgmi_shared_memory *xgmi_cmd; 1297 int ret; 1298 1299 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1300 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1301 1302 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1303 1304 /* Invoke xgmi ta to get hive id */ 1305 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1306 if (ret) 1307 return ret; 1308 1309 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1310 1311 return 0; 1312 } 1313 1314 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1315 { 1316 struct ta_xgmi_shared_memory *xgmi_cmd; 1317 int ret; 1318 1319 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1320 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1321 1322 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1323 1324 /* Invoke xgmi ta to get the node id */ 1325 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1326 if (ret) 1327 return ret; 1328 1329 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1330 1331 return 0; 1332 } 1333 1334 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1335 { 1336 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1337 IP_VERSION(13, 0, 2) && 1338 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1339 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1340 IP_VERSION(13, 0, 6); 1341 } 1342 1343 /* 1344 * Chips that support extended topology information require the driver to 1345 * reflect topology information in the opposite direction. This is 1346 * because the TA has already exceeded its link record limit and if the 1347 * TA holds bi-directional information, the driver would have to do 1348 * multiple fetches instead of just two. 1349 */ 1350 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1351 struct psp_xgmi_node_info node_info) 1352 { 1353 struct amdgpu_device *mirror_adev; 1354 struct amdgpu_hive_info *hive; 1355 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1356 uint64_t dst_node_id = node_info.node_id; 1357 uint8_t dst_num_hops = node_info.num_hops; 1358 uint8_t dst_num_links = node_info.num_links; 1359 1360 hive = amdgpu_get_xgmi_hive(psp->adev); 1361 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1362 struct psp_xgmi_topology_info *mirror_top_info; 1363 int j; 1364 1365 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1366 continue; 1367 1368 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1369 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1370 if (mirror_top_info->nodes[j].node_id != src_node_id) 1371 continue; 1372 1373 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1374 /* 1375 * prevent 0 num_links value re-reflection since reflection 1376 * criteria is based on num_hops (direct or indirect). 1377 * 1378 */ 1379 if (dst_num_links) 1380 mirror_top_info->nodes[j].num_links = dst_num_links; 1381 1382 break; 1383 } 1384 1385 break; 1386 } 1387 1388 amdgpu_put_xgmi_hive(hive); 1389 } 1390 1391 int psp_xgmi_get_topology_info(struct psp_context *psp, 1392 int number_devices, 1393 struct psp_xgmi_topology_info *topology, 1394 bool get_extended_data) 1395 { 1396 struct ta_xgmi_shared_memory *xgmi_cmd; 1397 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1398 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1399 int i; 1400 int ret; 1401 1402 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1403 return -EINVAL; 1404 1405 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1406 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1407 xgmi_cmd->flag_extend_link_record = get_extended_data; 1408 1409 /* Fill in the shared memory with topology information as input */ 1410 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1411 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1412 topology_info_input->num_nodes = number_devices; 1413 1414 for (i = 0; i < topology_info_input->num_nodes; i++) { 1415 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1416 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1417 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1418 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1419 } 1420 1421 /* Invoke xgmi ta to get the topology information */ 1422 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1423 if (ret) 1424 return ret; 1425 1426 /* Read the output topology information from the shared memory */ 1427 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1428 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1429 for (i = 0; i < topology->num_nodes; i++) { 1430 /* extended data will either be 0 or equal to non-extended data */ 1431 if (topology_info_output->nodes[i].num_hops) 1432 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1433 1434 /* non-extended data gets everything here so no need to update */ 1435 if (!get_extended_data) { 1436 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1437 topology->nodes[i].is_sharing_enabled = 1438 topology_info_output->nodes[i].is_sharing_enabled; 1439 topology->nodes[i].sdma_engine = 1440 topology_info_output->nodes[i].sdma_engine; 1441 } 1442 1443 } 1444 1445 /* Invoke xgmi ta again to get the link information */ 1446 if (psp_xgmi_peer_link_info_supported(psp)) { 1447 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1448 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1449 bool requires_reflection = 1450 (psp->xgmi_context.supports_extended_data && 1451 get_extended_data) || 1452 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1453 IP_VERSION(13, 0, 6); 1454 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1455 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1456 1457 /* popluate the shared output buffer rather than the cmd input buffer 1458 * with node_ids as the input for GET_PEER_LINKS command execution. 1459 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1460 * The same requirement for GET_EXTEND_PEER_LINKS command. 1461 */ 1462 if (ta_port_num_support) { 1463 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1464 1465 for (i = 0; i < topology->num_nodes; i++) 1466 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1467 1468 link_extend_info_output->num_nodes = topology->num_nodes; 1469 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1470 } else { 1471 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1472 1473 for (i = 0; i < topology->num_nodes; i++) 1474 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1475 1476 link_info_output->num_nodes = topology->num_nodes; 1477 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1478 } 1479 1480 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1481 if (ret) 1482 return ret; 1483 1484 for (i = 0; i < topology->num_nodes; i++) { 1485 uint8_t node_num_links = ta_port_num_support ? 1486 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1487 /* accumulate num_links on extended data */ 1488 if (get_extended_data) { 1489 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1490 } else { 1491 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1492 topology->nodes[i].num_links : node_num_links; 1493 } 1494 /* popluate the connected port num info if supported and available */ 1495 if (ta_port_num_support && topology->nodes[i].num_links) { 1496 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1497 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1498 } 1499 1500 /* reflect the topology information for bi-directionality */ 1501 if (requires_reflection && topology->nodes[i].num_hops) 1502 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1503 } 1504 } 1505 1506 return 0; 1507 } 1508 1509 int psp_xgmi_set_topology_info(struct psp_context *psp, 1510 int number_devices, 1511 struct psp_xgmi_topology_info *topology) 1512 { 1513 struct ta_xgmi_shared_memory *xgmi_cmd; 1514 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1515 int i; 1516 1517 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1518 return -EINVAL; 1519 1520 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1521 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1522 1523 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1524 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1525 topology_info_input->num_nodes = number_devices; 1526 1527 for (i = 0; i < topology_info_input->num_nodes; i++) { 1528 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1529 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1530 topology_info_input->nodes[i].is_sharing_enabled = 1; 1531 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1532 } 1533 1534 /* Invoke xgmi ta to set topology information */ 1535 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1536 } 1537 1538 // ras begin 1539 static void psp_ras_ta_check_status(struct psp_context *psp) 1540 { 1541 struct ta_ras_shared_memory *ras_cmd = 1542 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1543 1544 switch (ras_cmd->ras_status) { 1545 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1546 dev_warn(psp->adev->dev, 1547 "RAS WARNING: cmd failed due to unsupported ip\n"); 1548 break; 1549 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1550 dev_warn(psp->adev->dev, 1551 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1552 break; 1553 case TA_RAS_STATUS__SUCCESS: 1554 break; 1555 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1556 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1557 dev_warn(psp->adev->dev, 1558 "RAS WARNING: Inject error to critical region is not allowed\n"); 1559 break; 1560 default: 1561 dev_warn(psp->adev->dev, 1562 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1563 break; 1564 } 1565 } 1566 1567 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1568 { 1569 struct ta_ras_shared_memory *ras_cmd; 1570 int ret; 1571 1572 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1573 1574 /* 1575 * TODO: bypass the loading in sriov for now 1576 */ 1577 if (amdgpu_sriov_vf(psp->adev)) 1578 return 0; 1579 1580 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1581 1582 if (amdgpu_ras_intr_triggered()) 1583 return ret; 1584 1585 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1586 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1587 return -EINVAL; 1588 } 1589 1590 if (!ret) { 1591 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1592 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1593 1594 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1595 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1596 dev_warn(psp->adev->dev, 1597 "RAS internal register access blocked\n"); 1598 1599 psp_ras_ta_check_status(psp); 1600 } 1601 1602 return ret; 1603 } 1604 1605 int psp_ras_enable_features(struct psp_context *psp, 1606 union ta_ras_cmd_input *info, bool enable) 1607 { 1608 struct ta_ras_shared_memory *ras_cmd; 1609 int ret; 1610 1611 if (!psp->ras_context.context.initialized) 1612 return -EINVAL; 1613 1614 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1615 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1616 1617 if (enable) 1618 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 1619 else 1620 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 1621 1622 ras_cmd->ras_in_message = *info; 1623 1624 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1625 if (ret) 1626 return -EINVAL; 1627 1628 return 0; 1629 } 1630 1631 int psp_ras_terminate(struct psp_context *psp) 1632 { 1633 int ret; 1634 1635 /* 1636 * TODO: bypass the terminate in sriov for now 1637 */ 1638 if (amdgpu_sriov_vf(psp->adev)) 1639 return 0; 1640 1641 if (!psp->ras_context.context.initialized) 1642 return 0; 1643 1644 ret = psp_ta_unload(psp, &psp->ras_context.context); 1645 1646 psp->ras_context.context.initialized = false; 1647 1648 return ret; 1649 } 1650 1651 int psp_ras_initialize(struct psp_context *psp) 1652 { 1653 int ret; 1654 uint32_t boot_cfg = 0xFF; 1655 struct amdgpu_device *adev = psp->adev; 1656 struct ta_ras_shared_memory *ras_cmd; 1657 1658 /* 1659 * TODO: bypass the initialize in sriov for now 1660 */ 1661 if (amdgpu_sriov_vf(adev)) 1662 return 0; 1663 1664 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1665 !adev->psp.ras_context.context.bin_desc.start_addr) { 1666 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1667 return 0; 1668 } 1669 1670 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1671 /* query GECC enablement status from boot config 1672 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1673 */ 1674 ret = psp_boot_config_get(adev, &boot_cfg); 1675 if (ret) 1676 dev_warn(adev->dev, "PSP get boot config failed\n"); 1677 1678 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { 1679 if (!boot_cfg) { 1680 dev_info(adev->dev, "GECC is disabled\n"); 1681 } else { 1682 /* disable GECC in next boot cycle if ras is 1683 * disabled by module parameter amdgpu_ras_enable 1684 * and/or amdgpu_ras_mask, or boot_config_get call 1685 * is failed 1686 */ 1687 ret = psp_boot_config_set(adev, 0); 1688 if (ret) 1689 dev_warn(adev->dev, "PSP set boot config failed\n"); 1690 else 1691 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1692 } 1693 } else { 1694 if (boot_cfg == 1) { 1695 dev_info(adev->dev, "GECC is enabled\n"); 1696 } else { 1697 /* enable GECC in next boot cycle if it is disabled 1698 * in boot config, or force enable GECC if failed to 1699 * get boot configuration 1700 */ 1701 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1702 if (ret) 1703 dev_warn(adev->dev, "PSP set boot config failed\n"); 1704 else 1705 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1706 } 1707 } 1708 } 1709 1710 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1711 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1712 1713 if (!psp->ras_context.context.mem_context.shared_buf) { 1714 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1715 if (ret) 1716 return ret; 1717 } 1718 1719 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1720 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1721 1722 if (amdgpu_ras_is_poison_mode_supported(adev)) 1723 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1724 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1725 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1726 ras_cmd->ras_in_message.init_flags.xcc_mask = 1727 adev->gfx.xcc_mask; 1728 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1729 1730 ret = psp_ta_load(psp, &psp->ras_context.context); 1731 1732 if (!ret && !ras_cmd->ras_status) 1733 psp->ras_context.context.initialized = true; 1734 else { 1735 if (ras_cmd->ras_status) 1736 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1737 1738 /* fail to load RAS TA */ 1739 psp->ras_context.context.initialized = false; 1740 } 1741 1742 return ret; 1743 } 1744 1745 int psp_ras_trigger_error(struct psp_context *psp, 1746 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 1747 { 1748 struct ta_ras_shared_memory *ras_cmd; 1749 struct amdgpu_device *adev = psp->adev; 1750 int ret; 1751 uint32_t dev_mask; 1752 1753 if (!psp->ras_context.context.initialized) 1754 return -EINVAL; 1755 1756 switch (info->block_id) { 1757 case TA_RAS_BLOCK__GFX: 1758 dev_mask = GET_MASK(GC, instance_mask); 1759 break; 1760 case TA_RAS_BLOCK__SDMA: 1761 dev_mask = GET_MASK(SDMA0, instance_mask); 1762 break; 1763 case TA_RAS_BLOCK__VCN: 1764 case TA_RAS_BLOCK__JPEG: 1765 dev_mask = GET_MASK(VCN, instance_mask); 1766 break; 1767 default: 1768 dev_mask = instance_mask; 1769 break; 1770 } 1771 1772 /* reuse sub_block_index for backward compatibility */ 1773 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 1774 dev_mask &= AMDGPU_RAS_INST_MASK; 1775 info->sub_block_index |= dev_mask; 1776 1777 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1778 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1779 1780 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1781 ras_cmd->ras_in_message.trigger_error = *info; 1782 1783 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1784 if (ret) 1785 return -EINVAL; 1786 1787 /* If err_event_athub occurs error inject was successful, however 1788 * return status from TA is no long reliable 1789 */ 1790 if (amdgpu_ras_intr_triggered()) 1791 return 0; 1792 1793 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 1794 return -EACCES; 1795 else if (ras_cmd->ras_status) 1796 return -EINVAL; 1797 1798 return 0; 1799 } 1800 1801 int psp_ras_query_address(struct psp_context *psp, 1802 struct ta_ras_query_address_input *addr_in, 1803 struct ta_ras_query_address_output *addr_out) 1804 { 1805 struct ta_ras_shared_memory *ras_cmd; 1806 int ret; 1807 1808 if (!psp->ras_context.context.initialized) 1809 return -EINVAL; 1810 1811 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1812 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1813 1814 ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS; 1815 ras_cmd->ras_in_message.address = *addr_in; 1816 1817 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1818 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1819 return -EINVAL; 1820 1821 *addr_out = ras_cmd->ras_out_message.address; 1822 1823 return 0; 1824 } 1825 // ras end 1826 1827 // HDCP start 1828 static int psp_hdcp_initialize(struct psp_context *psp) 1829 { 1830 int ret; 1831 1832 /* 1833 * TODO: bypass the initialize in sriov for now 1834 */ 1835 if (amdgpu_sriov_vf(psp->adev)) 1836 return 0; 1837 1838 /* bypass hdcp initialization if dmu is harvested */ 1839 if (!amdgpu_device_has_display_hardware(psp->adev)) 1840 return 0; 1841 1842 if (!psp->hdcp_context.context.bin_desc.size_bytes || 1843 !psp->hdcp_context.context.bin_desc.start_addr) { 1844 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1845 return 0; 1846 } 1847 1848 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 1849 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1850 1851 if (!psp->hdcp_context.context.mem_context.shared_buf) { 1852 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 1853 if (ret) 1854 return ret; 1855 } 1856 1857 ret = psp_ta_load(psp, &psp->hdcp_context.context); 1858 if (!ret) { 1859 psp->hdcp_context.context.initialized = true; 1860 mutex_init(&psp->hdcp_context.mutex); 1861 } 1862 1863 return ret; 1864 } 1865 1866 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1867 { 1868 /* 1869 * TODO: bypass the loading in sriov for now 1870 */ 1871 if (amdgpu_sriov_vf(psp->adev)) 1872 return 0; 1873 1874 if (!psp->hdcp_context.context.initialized) 1875 return 0; 1876 1877 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 1878 } 1879 1880 static int psp_hdcp_terminate(struct psp_context *psp) 1881 { 1882 int ret; 1883 1884 /* 1885 * TODO: bypass the terminate in sriov for now 1886 */ 1887 if (amdgpu_sriov_vf(psp->adev)) 1888 return 0; 1889 1890 if (!psp->hdcp_context.context.initialized) 1891 return 0; 1892 1893 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 1894 1895 psp->hdcp_context.context.initialized = false; 1896 1897 return ret; 1898 } 1899 // HDCP end 1900 1901 // DTM start 1902 static int psp_dtm_initialize(struct psp_context *psp) 1903 { 1904 int ret; 1905 1906 /* 1907 * TODO: bypass the initialize in sriov for now 1908 */ 1909 if (amdgpu_sriov_vf(psp->adev)) 1910 return 0; 1911 1912 /* bypass dtm initialization if dmu is harvested */ 1913 if (!amdgpu_device_has_display_hardware(psp->adev)) 1914 return 0; 1915 1916 if (!psp->dtm_context.context.bin_desc.size_bytes || 1917 !psp->dtm_context.context.bin_desc.start_addr) { 1918 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1919 return 0; 1920 } 1921 1922 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 1923 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1924 1925 if (!psp->dtm_context.context.mem_context.shared_buf) { 1926 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 1927 if (ret) 1928 return ret; 1929 } 1930 1931 ret = psp_ta_load(psp, &psp->dtm_context.context); 1932 if (!ret) { 1933 psp->dtm_context.context.initialized = true; 1934 mutex_init(&psp->dtm_context.mutex); 1935 } 1936 1937 return ret; 1938 } 1939 1940 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1941 { 1942 /* 1943 * TODO: bypass the loading in sriov for now 1944 */ 1945 if (amdgpu_sriov_vf(psp->adev)) 1946 return 0; 1947 1948 if (!psp->dtm_context.context.initialized) 1949 return 0; 1950 1951 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 1952 } 1953 1954 static int psp_dtm_terminate(struct psp_context *psp) 1955 { 1956 int ret; 1957 1958 /* 1959 * TODO: bypass the terminate in sriov for now 1960 */ 1961 if (amdgpu_sriov_vf(psp->adev)) 1962 return 0; 1963 1964 if (!psp->dtm_context.context.initialized) 1965 return 0; 1966 1967 ret = psp_ta_unload(psp, &psp->dtm_context.context); 1968 1969 psp->dtm_context.context.initialized = false; 1970 1971 return ret; 1972 } 1973 // DTM end 1974 1975 // RAP start 1976 static int psp_rap_initialize(struct psp_context *psp) 1977 { 1978 int ret; 1979 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 1980 1981 /* 1982 * TODO: bypass the initialize in sriov for now 1983 */ 1984 if (amdgpu_sriov_vf(psp->adev)) 1985 return 0; 1986 1987 if (!psp->rap_context.context.bin_desc.size_bytes || 1988 !psp->rap_context.context.bin_desc.start_addr) { 1989 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 1990 return 0; 1991 } 1992 1993 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 1994 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1995 1996 if (!psp->rap_context.context.mem_context.shared_buf) { 1997 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 1998 if (ret) 1999 return ret; 2000 } 2001 2002 ret = psp_ta_load(psp, &psp->rap_context.context); 2003 if (!ret) { 2004 psp->rap_context.context.initialized = true; 2005 mutex_init(&psp->rap_context.mutex); 2006 } else 2007 return ret; 2008 2009 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2010 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2011 psp_rap_terminate(psp); 2012 /* free rap shared memory */ 2013 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2014 2015 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2016 ret, status); 2017 2018 return ret; 2019 } 2020 2021 return 0; 2022 } 2023 2024 static int psp_rap_terminate(struct psp_context *psp) 2025 { 2026 int ret; 2027 2028 if (!psp->rap_context.context.initialized) 2029 return 0; 2030 2031 ret = psp_ta_unload(psp, &psp->rap_context.context); 2032 2033 psp->rap_context.context.initialized = false; 2034 2035 return ret; 2036 } 2037 2038 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2039 { 2040 struct ta_rap_shared_memory *rap_cmd; 2041 int ret = 0; 2042 2043 if (!psp->rap_context.context.initialized) 2044 return 0; 2045 2046 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2047 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2048 return -EINVAL; 2049 2050 mutex_lock(&psp->rap_context.mutex); 2051 2052 rap_cmd = (struct ta_rap_shared_memory *) 2053 psp->rap_context.context.mem_context.shared_buf; 2054 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2055 2056 rap_cmd->cmd_id = ta_cmd_id; 2057 rap_cmd->validation_method_id = METHOD_A; 2058 2059 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2060 if (ret) 2061 goto out_unlock; 2062 2063 if (status) 2064 *status = rap_cmd->rap_status; 2065 2066 out_unlock: 2067 mutex_unlock(&psp->rap_context.mutex); 2068 2069 return ret; 2070 } 2071 // RAP end 2072 2073 /* securedisplay start */ 2074 static int psp_securedisplay_initialize(struct psp_context *psp) 2075 { 2076 int ret; 2077 struct ta_securedisplay_cmd *securedisplay_cmd; 2078 2079 /* 2080 * TODO: bypass the initialize in sriov for now 2081 */ 2082 if (amdgpu_sriov_vf(psp->adev)) 2083 return 0; 2084 2085 /* bypass securedisplay initialization if dmu is harvested */ 2086 if (!amdgpu_device_has_display_hardware(psp->adev)) 2087 return 0; 2088 2089 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2090 !psp->securedisplay_context.context.bin_desc.start_addr) { 2091 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); 2092 return 0; 2093 } 2094 2095 psp->securedisplay_context.context.mem_context.shared_mem_size = 2096 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2097 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2098 2099 if (!psp->securedisplay_context.context.initialized) { 2100 ret = psp_ta_init_shared_buf(psp, 2101 &psp->securedisplay_context.context.mem_context); 2102 if (ret) 2103 return ret; 2104 } 2105 2106 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2107 if (!ret) { 2108 psp->securedisplay_context.context.initialized = true; 2109 mutex_init(&psp->securedisplay_context.mutex); 2110 } else 2111 return ret; 2112 2113 mutex_lock(&psp->securedisplay_context.mutex); 2114 2115 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2116 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2117 2118 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2119 2120 mutex_unlock(&psp->securedisplay_context.mutex); 2121 2122 if (ret) { 2123 psp_securedisplay_terminate(psp); 2124 /* free securedisplay shared memory */ 2125 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2126 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2127 return -EINVAL; 2128 } 2129 2130 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2131 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2132 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2133 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2134 /* don't try again */ 2135 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2136 } 2137 2138 return 0; 2139 } 2140 2141 static int psp_securedisplay_terminate(struct psp_context *psp) 2142 { 2143 int ret; 2144 2145 /* 2146 * TODO:bypass the terminate in sriov for now 2147 */ 2148 if (amdgpu_sriov_vf(psp->adev)) 2149 return 0; 2150 2151 if (!psp->securedisplay_context.context.initialized) 2152 return 0; 2153 2154 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2155 2156 psp->securedisplay_context.context.initialized = false; 2157 2158 return ret; 2159 } 2160 2161 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2162 { 2163 int ret; 2164 2165 if (!psp->securedisplay_context.context.initialized) 2166 return -EINVAL; 2167 2168 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2169 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC) 2170 return -EINVAL; 2171 2172 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2173 2174 return ret; 2175 } 2176 /* SECUREDISPLAY end */ 2177 2178 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2179 { 2180 struct psp_context *psp = &adev->psp; 2181 int ret = 0; 2182 2183 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2184 ret = psp->funcs->wait_for_bootloader(psp); 2185 2186 return ret; 2187 } 2188 2189 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2190 { 2191 if (psp->funcs && 2192 psp->funcs->get_ras_capability) { 2193 return psp->funcs->get_ras_capability(psp); 2194 } else { 2195 return false; 2196 } 2197 } 2198 2199 static int psp_hw_start(struct psp_context *psp) 2200 { 2201 struct amdgpu_device *adev = psp->adev; 2202 int ret; 2203 2204 if (!amdgpu_sriov_vf(adev)) { 2205 if ((is_psp_fw_valid(psp->kdb)) && 2206 (psp->funcs->bootloader_load_kdb != NULL)) { 2207 ret = psp_bootloader_load_kdb(psp); 2208 if (ret) { 2209 dev_err(adev->dev, "PSP load kdb failed!\n"); 2210 return ret; 2211 } 2212 } 2213 2214 if ((is_psp_fw_valid(psp->spl)) && 2215 (psp->funcs->bootloader_load_spl != NULL)) { 2216 ret = psp_bootloader_load_spl(psp); 2217 if (ret) { 2218 dev_err(adev->dev, "PSP load spl failed!\n"); 2219 return ret; 2220 } 2221 } 2222 2223 if ((is_psp_fw_valid(psp->sys)) && 2224 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2225 ret = psp_bootloader_load_sysdrv(psp); 2226 if (ret) { 2227 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2228 return ret; 2229 } 2230 } 2231 2232 if ((is_psp_fw_valid(psp->soc_drv)) && 2233 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2234 ret = psp_bootloader_load_soc_drv(psp); 2235 if (ret) { 2236 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2237 return ret; 2238 } 2239 } 2240 2241 if ((is_psp_fw_valid(psp->intf_drv)) && 2242 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2243 ret = psp_bootloader_load_intf_drv(psp); 2244 if (ret) { 2245 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2246 return ret; 2247 } 2248 } 2249 2250 if ((is_psp_fw_valid(psp->dbg_drv)) && 2251 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2252 ret = psp_bootloader_load_dbg_drv(psp); 2253 if (ret) { 2254 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2255 return ret; 2256 } 2257 } 2258 2259 if ((is_psp_fw_valid(psp->ras_drv)) && 2260 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2261 ret = psp_bootloader_load_ras_drv(psp); 2262 if (ret) { 2263 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2264 return ret; 2265 } 2266 } 2267 2268 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2269 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2270 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2271 if (ret) { 2272 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2273 return ret; 2274 } 2275 } 2276 2277 if ((is_psp_fw_valid(psp->sos)) && 2278 (psp->funcs->bootloader_load_sos != NULL)) { 2279 ret = psp_bootloader_load_sos(psp); 2280 if (ret) { 2281 dev_err(adev->dev, "PSP load sos failed!\n"); 2282 return ret; 2283 } 2284 } 2285 } 2286 2287 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2288 if (ret) { 2289 dev_err(adev->dev, "PSP create ring failed!\n"); 2290 return ret; 2291 } 2292 2293 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2294 goto skip_pin_bo; 2295 2296 if (!psp->boot_time_tmr || psp->autoload_supported) { 2297 ret = psp_tmr_init(psp); 2298 if (ret) { 2299 dev_err(adev->dev, "PSP tmr init failed!\n"); 2300 return ret; 2301 } 2302 } 2303 2304 skip_pin_bo: 2305 /* 2306 * For ASICs with DF Cstate management centralized 2307 * to PMFW, TMR setup should be performed after PMFW 2308 * loaded and before other non-psp firmware loaded. 2309 */ 2310 if (psp->pmfw_centralized_cstate_management) { 2311 ret = psp_load_smu_fw(psp); 2312 if (ret) 2313 return ret; 2314 } 2315 2316 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2317 ret = psp_tmr_load(psp); 2318 if (ret) { 2319 dev_err(adev->dev, "PSP load tmr failed!\n"); 2320 return ret; 2321 } 2322 } 2323 2324 return 0; 2325 } 2326 2327 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2328 enum psp_gfx_fw_type *type) 2329 { 2330 switch (ucode->ucode_id) { 2331 case AMDGPU_UCODE_ID_CAP: 2332 *type = GFX_FW_TYPE_CAP; 2333 break; 2334 case AMDGPU_UCODE_ID_SDMA0: 2335 *type = GFX_FW_TYPE_SDMA0; 2336 break; 2337 case AMDGPU_UCODE_ID_SDMA1: 2338 *type = GFX_FW_TYPE_SDMA1; 2339 break; 2340 case AMDGPU_UCODE_ID_SDMA2: 2341 *type = GFX_FW_TYPE_SDMA2; 2342 break; 2343 case AMDGPU_UCODE_ID_SDMA3: 2344 *type = GFX_FW_TYPE_SDMA3; 2345 break; 2346 case AMDGPU_UCODE_ID_SDMA4: 2347 *type = GFX_FW_TYPE_SDMA4; 2348 break; 2349 case AMDGPU_UCODE_ID_SDMA5: 2350 *type = GFX_FW_TYPE_SDMA5; 2351 break; 2352 case AMDGPU_UCODE_ID_SDMA6: 2353 *type = GFX_FW_TYPE_SDMA6; 2354 break; 2355 case AMDGPU_UCODE_ID_SDMA7: 2356 *type = GFX_FW_TYPE_SDMA7; 2357 break; 2358 case AMDGPU_UCODE_ID_CP_MES: 2359 *type = GFX_FW_TYPE_CP_MES; 2360 break; 2361 case AMDGPU_UCODE_ID_CP_MES_DATA: 2362 *type = GFX_FW_TYPE_MES_STACK; 2363 break; 2364 case AMDGPU_UCODE_ID_CP_MES1: 2365 *type = GFX_FW_TYPE_CP_MES_KIQ; 2366 break; 2367 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2368 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2369 break; 2370 case AMDGPU_UCODE_ID_CP_CE: 2371 *type = GFX_FW_TYPE_CP_CE; 2372 break; 2373 case AMDGPU_UCODE_ID_CP_PFP: 2374 *type = GFX_FW_TYPE_CP_PFP; 2375 break; 2376 case AMDGPU_UCODE_ID_CP_ME: 2377 *type = GFX_FW_TYPE_CP_ME; 2378 break; 2379 case AMDGPU_UCODE_ID_CP_MEC1: 2380 *type = GFX_FW_TYPE_CP_MEC; 2381 break; 2382 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2383 *type = GFX_FW_TYPE_CP_MEC_ME1; 2384 break; 2385 case AMDGPU_UCODE_ID_CP_MEC2: 2386 *type = GFX_FW_TYPE_CP_MEC; 2387 break; 2388 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2389 *type = GFX_FW_TYPE_CP_MEC_ME2; 2390 break; 2391 case AMDGPU_UCODE_ID_RLC_P: 2392 *type = GFX_FW_TYPE_RLC_P; 2393 break; 2394 case AMDGPU_UCODE_ID_RLC_V: 2395 *type = GFX_FW_TYPE_RLC_V; 2396 break; 2397 case AMDGPU_UCODE_ID_RLC_G: 2398 *type = GFX_FW_TYPE_RLC_G; 2399 break; 2400 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2401 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2402 break; 2403 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2404 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2405 break; 2406 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2407 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2408 break; 2409 case AMDGPU_UCODE_ID_RLC_IRAM: 2410 *type = GFX_FW_TYPE_RLC_IRAM; 2411 break; 2412 case AMDGPU_UCODE_ID_RLC_DRAM: 2413 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2414 break; 2415 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2416 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2417 break; 2418 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2419 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2420 break; 2421 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2422 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2423 break; 2424 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2425 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2426 break; 2427 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2428 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2429 break; 2430 case AMDGPU_UCODE_ID_SMC: 2431 *type = GFX_FW_TYPE_SMU; 2432 break; 2433 case AMDGPU_UCODE_ID_PPTABLE: 2434 *type = GFX_FW_TYPE_PPTABLE; 2435 break; 2436 case AMDGPU_UCODE_ID_UVD: 2437 *type = GFX_FW_TYPE_UVD; 2438 break; 2439 case AMDGPU_UCODE_ID_UVD1: 2440 *type = GFX_FW_TYPE_UVD1; 2441 break; 2442 case AMDGPU_UCODE_ID_VCE: 2443 *type = GFX_FW_TYPE_VCE; 2444 break; 2445 case AMDGPU_UCODE_ID_VCN: 2446 *type = GFX_FW_TYPE_VCN; 2447 break; 2448 case AMDGPU_UCODE_ID_VCN1: 2449 *type = GFX_FW_TYPE_VCN1; 2450 break; 2451 case AMDGPU_UCODE_ID_DMCU_ERAM: 2452 *type = GFX_FW_TYPE_DMCU_ERAM; 2453 break; 2454 case AMDGPU_UCODE_ID_DMCU_INTV: 2455 *type = GFX_FW_TYPE_DMCU_ISR; 2456 break; 2457 case AMDGPU_UCODE_ID_VCN0_RAM: 2458 *type = GFX_FW_TYPE_VCN0_RAM; 2459 break; 2460 case AMDGPU_UCODE_ID_VCN1_RAM: 2461 *type = GFX_FW_TYPE_VCN1_RAM; 2462 break; 2463 case AMDGPU_UCODE_ID_DMCUB: 2464 *type = GFX_FW_TYPE_DMUB; 2465 break; 2466 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2467 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2468 break; 2469 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2470 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2471 break; 2472 case AMDGPU_UCODE_ID_IMU_I: 2473 *type = GFX_FW_TYPE_IMU_I; 2474 break; 2475 case AMDGPU_UCODE_ID_IMU_D: 2476 *type = GFX_FW_TYPE_IMU_D; 2477 break; 2478 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2479 *type = GFX_FW_TYPE_RS64_PFP; 2480 break; 2481 case AMDGPU_UCODE_ID_CP_RS64_ME: 2482 *type = GFX_FW_TYPE_RS64_ME; 2483 break; 2484 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2485 *type = GFX_FW_TYPE_RS64_MEC; 2486 break; 2487 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2488 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2489 break; 2490 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2491 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2492 break; 2493 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2494 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2495 break; 2496 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2497 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2498 break; 2499 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2500 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2501 break; 2502 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2503 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2504 break; 2505 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2506 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2507 break; 2508 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2509 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2510 break; 2511 case AMDGPU_UCODE_ID_VPE_CTX: 2512 *type = GFX_FW_TYPE_VPEC_FW1; 2513 break; 2514 case AMDGPU_UCODE_ID_VPE_CTL: 2515 *type = GFX_FW_TYPE_VPEC_FW2; 2516 break; 2517 case AMDGPU_UCODE_ID_VPE: 2518 *type = GFX_FW_TYPE_VPE; 2519 break; 2520 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2521 *type = GFX_FW_TYPE_UMSCH_UCODE; 2522 break; 2523 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2524 *type = GFX_FW_TYPE_UMSCH_DATA; 2525 break; 2526 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2527 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2528 break; 2529 case AMDGPU_UCODE_ID_P2S_TABLE: 2530 *type = GFX_FW_TYPE_P2S_TABLE; 2531 break; 2532 case AMDGPU_UCODE_ID_JPEG_RAM: 2533 *type = GFX_FW_TYPE_JPEG_RAM; 2534 break; 2535 case AMDGPU_UCODE_ID_MAXIMUM: 2536 default: 2537 return -EINVAL; 2538 } 2539 2540 return 0; 2541 } 2542 2543 static void psp_print_fw_hdr(struct psp_context *psp, 2544 struct amdgpu_firmware_info *ucode) 2545 { 2546 struct amdgpu_device *adev = psp->adev; 2547 struct common_firmware_header *hdr; 2548 2549 switch (ucode->ucode_id) { 2550 case AMDGPU_UCODE_ID_SDMA0: 2551 case AMDGPU_UCODE_ID_SDMA1: 2552 case AMDGPU_UCODE_ID_SDMA2: 2553 case AMDGPU_UCODE_ID_SDMA3: 2554 case AMDGPU_UCODE_ID_SDMA4: 2555 case AMDGPU_UCODE_ID_SDMA5: 2556 case AMDGPU_UCODE_ID_SDMA6: 2557 case AMDGPU_UCODE_ID_SDMA7: 2558 hdr = (struct common_firmware_header *) 2559 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2560 amdgpu_ucode_print_sdma_hdr(hdr); 2561 break; 2562 case AMDGPU_UCODE_ID_CP_CE: 2563 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2564 amdgpu_ucode_print_gfx_hdr(hdr); 2565 break; 2566 case AMDGPU_UCODE_ID_CP_PFP: 2567 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2568 amdgpu_ucode_print_gfx_hdr(hdr); 2569 break; 2570 case AMDGPU_UCODE_ID_CP_ME: 2571 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2572 amdgpu_ucode_print_gfx_hdr(hdr); 2573 break; 2574 case AMDGPU_UCODE_ID_CP_MEC1: 2575 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2576 amdgpu_ucode_print_gfx_hdr(hdr); 2577 break; 2578 case AMDGPU_UCODE_ID_RLC_G: 2579 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2580 amdgpu_ucode_print_rlc_hdr(hdr); 2581 break; 2582 case AMDGPU_UCODE_ID_SMC: 2583 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2584 amdgpu_ucode_print_smc_hdr(hdr); 2585 break; 2586 default: 2587 break; 2588 } 2589 } 2590 2591 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2592 struct amdgpu_firmware_info *ucode, 2593 struct psp_gfx_cmd_resp *cmd) 2594 { 2595 int ret; 2596 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2597 2598 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2599 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2600 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2601 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2602 2603 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2604 if (ret) 2605 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2606 2607 return ret; 2608 } 2609 2610 int psp_execute_ip_fw_load(struct psp_context *psp, 2611 struct amdgpu_firmware_info *ucode) 2612 { 2613 int ret = 0; 2614 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2615 2616 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2617 if (!ret) { 2618 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2619 psp->fence_buf_mc_addr); 2620 } 2621 2622 release_psp_cmd_buf(psp); 2623 2624 return ret; 2625 } 2626 2627 static int psp_load_p2s_table(struct psp_context *psp) 2628 { 2629 int ret; 2630 struct amdgpu_device *adev = psp->adev; 2631 struct amdgpu_firmware_info *ucode = 2632 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2633 2634 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2635 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2636 return 0; 2637 2638 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { 2639 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2640 0x0036003C; 2641 if (psp->sos.fw_version < supp_vers) 2642 return 0; 2643 } 2644 2645 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2646 return 0; 2647 2648 ret = psp_execute_ip_fw_load(psp, ucode); 2649 2650 return ret; 2651 } 2652 2653 static int psp_load_smu_fw(struct psp_context *psp) 2654 { 2655 int ret; 2656 struct amdgpu_device *adev = psp->adev; 2657 struct amdgpu_firmware_info *ucode = 2658 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2659 struct amdgpu_ras *ras = psp->ras_context.ras; 2660 2661 /* 2662 * Skip SMU FW reloading in case of using BACO for runpm only, 2663 * as SMU is always alive. 2664 */ 2665 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2666 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2667 return 0; 2668 2669 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2670 return 0; 2671 2672 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2673 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2674 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2675 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2676 if (ret) 2677 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2678 } 2679 2680 ret = psp_execute_ip_fw_load(psp, ucode); 2681 2682 if (ret) 2683 dev_err(adev->dev, "PSP load smu failed!\n"); 2684 2685 return ret; 2686 } 2687 2688 static bool fw_load_skip_check(struct psp_context *psp, 2689 struct amdgpu_firmware_info *ucode) 2690 { 2691 if (!ucode->fw || !ucode->ucode_size) 2692 return true; 2693 2694 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2695 return true; 2696 2697 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2698 (psp_smu_reload_quirk(psp) || 2699 psp->autoload_supported || 2700 psp->pmfw_centralized_cstate_management)) 2701 return true; 2702 2703 if (amdgpu_sriov_vf(psp->adev) && 2704 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2705 return true; 2706 2707 if (psp->autoload_supported && 2708 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2709 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2710 /* skip mec JT when autoload is enabled */ 2711 return true; 2712 2713 return false; 2714 } 2715 2716 int psp_load_fw_list(struct psp_context *psp, 2717 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2718 { 2719 int ret = 0, i; 2720 struct amdgpu_firmware_info *ucode; 2721 2722 for (i = 0; i < ucode_count; ++i) { 2723 ucode = ucode_list[i]; 2724 psp_print_fw_hdr(psp, ucode); 2725 ret = psp_execute_ip_fw_load(psp, ucode); 2726 if (ret) 2727 return ret; 2728 } 2729 return ret; 2730 } 2731 2732 static int psp_load_non_psp_fw(struct psp_context *psp) 2733 { 2734 int i, ret; 2735 struct amdgpu_firmware_info *ucode; 2736 struct amdgpu_device *adev = psp->adev; 2737 2738 if (psp->autoload_supported && 2739 !psp->pmfw_centralized_cstate_management) { 2740 ret = psp_load_smu_fw(psp); 2741 if (ret) 2742 return ret; 2743 } 2744 2745 /* Load P2S table first if it's available */ 2746 psp_load_p2s_table(psp); 2747 2748 for (i = 0; i < adev->firmware.max_ucodes; i++) { 2749 ucode = &adev->firmware.ucode[i]; 2750 2751 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2752 !fw_load_skip_check(psp, ucode)) { 2753 ret = psp_load_smu_fw(psp); 2754 if (ret) 2755 return ret; 2756 continue; 2757 } 2758 2759 if (fw_load_skip_check(psp, ucode)) 2760 continue; 2761 2762 if (psp->autoload_supported && 2763 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 2764 IP_VERSION(11, 0, 7) || 2765 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2766 IP_VERSION(11, 0, 11) || 2767 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2768 IP_VERSION(11, 0, 12)) && 2769 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2770 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2771 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 2772 /* PSP only receive one SDMA fw for sienna_cichlid, 2773 * as all four sdma fw are same 2774 */ 2775 continue; 2776 2777 psp_print_fw_hdr(psp, ucode); 2778 2779 ret = psp_execute_ip_fw_load(psp, ucode); 2780 if (ret) 2781 return ret; 2782 2783 /* Start rlc autoload after psp recieved all the gfx firmware */ 2784 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2785 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 2786 ret = psp_rlc_autoload_start(psp); 2787 if (ret) { 2788 dev_err(adev->dev, "Failed to start rlc autoload\n"); 2789 return ret; 2790 } 2791 } 2792 } 2793 2794 return 0; 2795 } 2796 2797 static int psp_load_fw(struct amdgpu_device *adev) 2798 { 2799 int ret; 2800 struct psp_context *psp = &adev->psp; 2801 2802 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2803 /* should not destroy ring, only stop */ 2804 psp_ring_stop(psp, PSP_RING_TYPE__KM); 2805 } else { 2806 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2807 2808 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2809 if (ret) { 2810 dev_err(adev->dev, "PSP ring init failed!\n"); 2811 goto failed; 2812 } 2813 } 2814 2815 ret = psp_hw_start(psp); 2816 if (ret) 2817 goto failed; 2818 2819 ret = psp_load_non_psp_fw(psp); 2820 if (ret) 2821 goto failed1; 2822 2823 ret = psp_asd_initialize(psp); 2824 if (ret) { 2825 dev_err(adev->dev, "PSP load asd failed!\n"); 2826 goto failed1; 2827 } 2828 2829 ret = psp_rl_load(adev); 2830 if (ret) { 2831 dev_err(adev->dev, "PSP load RL failed!\n"); 2832 goto failed1; 2833 } 2834 2835 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2836 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2837 ret = psp_xgmi_initialize(psp, false, true); 2838 /* Warning the XGMI seesion initialize failure 2839 * Instead of stop driver initialization 2840 */ 2841 if (ret) 2842 dev_err(psp->adev->dev, 2843 "XGMI: Failed to initialize XGMI session\n"); 2844 } 2845 } 2846 2847 if (psp->ta_fw) { 2848 ret = psp_ras_initialize(psp); 2849 if (ret) 2850 dev_err(psp->adev->dev, 2851 "RAS: Failed to initialize RAS\n"); 2852 2853 ret = psp_hdcp_initialize(psp); 2854 if (ret) 2855 dev_err(psp->adev->dev, 2856 "HDCP: Failed to initialize HDCP\n"); 2857 2858 ret = psp_dtm_initialize(psp); 2859 if (ret) 2860 dev_err(psp->adev->dev, 2861 "DTM: Failed to initialize DTM\n"); 2862 2863 ret = psp_rap_initialize(psp); 2864 if (ret) 2865 dev_err(psp->adev->dev, 2866 "RAP: Failed to initialize RAP\n"); 2867 2868 ret = psp_securedisplay_initialize(psp); 2869 if (ret) 2870 dev_err(psp->adev->dev, 2871 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 2872 } 2873 2874 return 0; 2875 2876 failed1: 2877 psp_free_shared_bufs(psp); 2878 failed: 2879 /* 2880 * all cleanup jobs (xgmi terminate, ras terminate, 2881 * ring destroy, cmd/fence/fw buffers destory, 2882 * psp->cmd destory) are delayed to psp_hw_fini 2883 */ 2884 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2885 return ret; 2886 } 2887 2888 static int psp_hw_init(void *handle) 2889 { 2890 int ret; 2891 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2892 2893 mutex_lock(&adev->firmware.mutex); 2894 /* 2895 * This sequence is just used on hw_init only once, no need on 2896 * resume. 2897 */ 2898 ret = amdgpu_ucode_init_bo(adev); 2899 if (ret) 2900 goto failed; 2901 2902 ret = psp_load_fw(adev); 2903 if (ret) { 2904 dev_err(adev->dev, "PSP firmware loading failed\n"); 2905 goto failed; 2906 } 2907 2908 mutex_unlock(&adev->firmware.mutex); 2909 return 0; 2910 2911 failed: 2912 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 2913 mutex_unlock(&adev->firmware.mutex); 2914 return -EINVAL; 2915 } 2916 2917 static int psp_hw_fini(void *handle) 2918 { 2919 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2920 struct psp_context *psp = &adev->psp; 2921 2922 if (psp->ta_fw) { 2923 psp_ras_terminate(psp); 2924 psp_securedisplay_terminate(psp); 2925 psp_rap_terminate(psp); 2926 psp_dtm_terminate(psp); 2927 psp_hdcp_terminate(psp); 2928 2929 if (adev->gmc.xgmi.num_physical_nodes > 1) 2930 psp_xgmi_terminate(psp); 2931 } 2932 2933 psp_asd_terminate(psp); 2934 psp_tmr_terminate(psp); 2935 2936 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2937 2938 return 0; 2939 } 2940 2941 static int psp_suspend(void *handle) 2942 { 2943 int ret = 0; 2944 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2945 struct psp_context *psp = &adev->psp; 2946 2947 if (adev->gmc.xgmi.num_physical_nodes > 1 && 2948 psp->xgmi_context.context.initialized) { 2949 ret = psp_xgmi_terminate(psp); 2950 if (ret) { 2951 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 2952 goto out; 2953 } 2954 } 2955 2956 if (psp->ta_fw) { 2957 ret = psp_ras_terminate(psp); 2958 if (ret) { 2959 dev_err(adev->dev, "Failed to terminate ras ta\n"); 2960 goto out; 2961 } 2962 ret = psp_hdcp_terminate(psp); 2963 if (ret) { 2964 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 2965 goto out; 2966 } 2967 ret = psp_dtm_terminate(psp); 2968 if (ret) { 2969 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 2970 goto out; 2971 } 2972 ret = psp_rap_terminate(psp); 2973 if (ret) { 2974 dev_err(adev->dev, "Failed to terminate rap ta\n"); 2975 goto out; 2976 } 2977 ret = psp_securedisplay_terminate(psp); 2978 if (ret) { 2979 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 2980 goto out; 2981 } 2982 } 2983 2984 ret = psp_asd_terminate(psp); 2985 if (ret) { 2986 dev_err(adev->dev, "Failed to terminate asd\n"); 2987 goto out; 2988 } 2989 2990 ret = psp_tmr_terminate(psp); 2991 if (ret) { 2992 dev_err(adev->dev, "Failed to terminate tmr\n"); 2993 goto out; 2994 } 2995 2996 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 2997 if (ret) 2998 dev_err(adev->dev, "PSP ring stop failed\n"); 2999 3000 out: 3001 return ret; 3002 } 3003 3004 static int psp_resume(void *handle) 3005 { 3006 int ret; 3007 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3008 struct psp_context *psp = &adev->psp; 3009 3010 dev_info(adev->dev, "PSP is resuming...\n"); 3011 3012 if (psp->mem_train_ctx.enable_mem_training) { 3013 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3014 if (ret) { 3015 dev_err(adev->dev, "Failed to process memory training!\n"); 3016 return ret; 3017 } 3018 } 3019 3020 mutex_lock(&adev->firmware.mutex); 3021 3022 ret = psp_hw_start(psp); 3023 if (ret) 3024 goto failed; 3025 3026 ret = psp_load_non_psp_fw(psp); 3027 if (ret) 3028 goto failed; 3029 3030 ret = psp_asd_initialize(psp); 3031 if (ret) { 3032 dev_err(adev->dev, "PSP load asd failed!\n"); 3033 goto failed; 3034 } 3035 3036 ret = psp_rl_load(adev); 3037 if (ret) { 3038 dev_err(adev->dev, "PSP load RL failed!\n"); 3039 goto failed; 3040 } 3041 3042 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3043 ret = psp_xgmi_initialize(psp, false, true); 3044 /* Warning the XGMI seesion initialize failure 3045 * Instead of stop driver initialization 3046 */ 3047 if (ret) 3048 dev_err(psp->adev->dev, 3049 "XGMI: Failed to initialize XGMI session\n"); 3050 } 3051 3052 if (psp->ta_fw) { 3053 ret = psp_ras_initialize(psp); 3054 if (ret) 3055 dev_err(psp->adev->dev, 3056 "RAS: Failed to initialize RAS\n"); 3057 3058 ret = psp_hdcp_initialize(psp); 3059 if (ret) 3060 dev_err(psp->adev->dev, 3061 "HDCP: Failed to initialize HDCP\n"); 3062 3063 ret = psp_dtm_initialize(psp); 3064 if (ret) 3065 dev_err(psp->adev->dev, 3066 "DTM: Failed to initialize DTM\n"); 3067 3068 ret = psp_rap_initialize(psp); 3069 if (ret) 3070 dev_err(psp->adev->dev, 3071 "RAP: Failed to initialize RAP\n"); 3072 3073 ret = psp_securedisplay_initialize(psp); 3074 if (ret) 3075 dev_err(psp->adev->dev, 3076 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3077 } 3078 3079 mutex_unlock(&adev->firmware.mutex); 3080 3081 return 0; 3082 3083 failed: 3084 dev_err(adev->dev, "PSP resume failed\n"); 3085 mutex_unlock(&adev->firmware.mutex); 3086 return ret; 3087 } 3088 3089 int psp_gpu_reset(struct amdgpu_device *adev) 3090 { 3091 int ret; 3092 3093 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3094 return 0; 3095 3096 mutex_lock(&adev->psp.mutex); 3097 ret = psp_mode1_reset(&adev->psp); 3098 mutex_unlock(&adev->psp.mutex); 3099 3100 return ret; 3101 } 3102 3103 int psp_rlc_autoload_start(struct psp_context *psp) 3104 { 3105 int ret; 3106 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3107 3108 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3109 3110 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3111 psp->fence_buf_mc_addr); 3112 3113 release_psp_cmd_buf(psp); 3114 3115 return ret; 3116 } 3117 3118 int psp_ring_cmd_submit(struct psp_context *psp, 3119 uint64_t cmd_buf_mc_addr, 3120 uint64_t fence_mc_addr, 3121 int index) 3122 { 3123 unsigned int psp_write_ptr_reg = 0; 3124 struct psp_gfx_rb_frame *write_frame; 3125 struct psp_ring *ring = &psp->km_ring; 3126 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3127 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3128 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3129 struct amdgpu_device *adev = psp->adev; 3130 uint32_t ring_size_dw = ring->ring_size / 4; 3131 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3132 3133 /* KM (GPCOM) prepare write pointer */ 3134 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3135 3136 /* Update KM RB frame pointer to new frame */ 3137 /* write_frame ptr increments by size of rb_frame in bytes */ 3138 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3139 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3140 write_frame = ring_buffer_start; 3141 else 3142 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3143 /* Check invalid write_frame ptr address */ 3144 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3145 dev_err(adev->dev, 3146 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3147 ring_buffer_start, ring_buffer_end, write_frame); 3148 dev_err(adev->dev, 3149 "write_frame is pointing to address out of bounds\n"); 3150 return -EINVAL; 3151 } 3152 3153 /* Initialize KM RB frame */ 3154 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3155 3156 /* Update KM RB frame */ 3157 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3158 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3159 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3160 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3161 write_frame->fence_value = index; 3162 amdgpu_device_flush_hdp(adev, NULL); 3163 3164 /* Update the write Pointer in DWORDs */ 3165 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3166 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3167 return 0; 3168 } 3169 3170 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3171 { 3172 struct amdgpu_device *adev = psp->adev; 3173 char fw_name[PSP_FW_NAME_LEN]; 3174 const struct psp_firmware_header_v1_0 *asd_hdr; 3175 int err = 0; 3176 3177 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 3178 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name); 3179 if (err) 3180 goto out; 3181 3182 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3183 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3184 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3185 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3186 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3187 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3188 return 0; 3189 out: 3190 amdgpu_ucode_release(&adev->psp.asd_fw); 3191 return err; 3192 } 3193 3194 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3195 { 3196 struct amdgpu_device *adev = psp->adev; 3197 char fw_name[PSP_FW_NAME_LEN]; 3198 const struct psp_firmware_header_v1_0 *toc_hdr; 3199 int err = 0; 3200 3201 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name); 3202 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); 3203 if (err) 3204 goto out; 3205 3206 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3207 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3208 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3209 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3210 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3211 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3212 return 0; 3213 out: 3214 amdgpu_ucode_release(&adev->psp.toc_fw); 3215 return err; 3216 } 3217 3218 static int parse_sos_bin_descriptor(struct psp_context *psp, 3219 const struct psp_fw_bin_desc *desc, 3220 const struct psp_firmware_header_v2_0 *sos_hdr) 3221 { 3222 uint8_t *ucode_start_addr = NULL; 3223 3224 if (!psp || !desc || !sos_hdr) 3225 return -EINVAL; 3226 3227 ucode_start_addr = (uint8_t *)sos_hdr + 3228 le32_to_cpu(desc->offset_bytes) + 3229 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3230 3231 switch (desc->fw_type) { 3232 case PSP_FW_TYPE_PSP_SOS: 3233 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3234 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3235 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3236 psp->sos.start_addr = ucode_start_addr; 3237 break; 3238 case PSP_FW_TYPE_PSP_SYS_DRV: 3239 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3240 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3241 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3242 psp->sys.start_addr = ucode_start_addr; 3243 break; 3244 case PSP_FW_TYPE_PSP_KDB: 3245 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3246 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3247 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3248 psp->kdb.start_addr = ucode_start_addr; 3249 break; 3250 case PSP_FW_TYPE_PSP_TOC: 3251 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3252 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3253 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3254 psp->toc.start_addr = ucode_start_addr; 3255 break; 3256 case PSP_FW_TYPE_PSP_SPL: 3257 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3258 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3259 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3260 psp->spl.start_addr = ucode_start_addr; 3261 break; 3262 case PSP_FW_TYPE_PSP_RL: 3263 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3264 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3265 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3266 psp->rl.start_addr = ucode_start_addr; 3267 break; 3268 case PSP_FW_TYPE_PSP_SOC_DRV: 3269 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3270 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3271 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3272 psp->soc_drv.start_addr = ucode_start_addr; 3273 break; 3274 case PSP_FW_TYPE_PSP_INTF_DRV: 3275 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3276 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3277 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3278 psp->intf_drv.start_addr = ucode_start_addr; 3279 break; 3280 case PSP_FW_TYPE_PSP_DBG_DRV: 3281 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3282 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3283 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3284 psp->dbg_drv.start_addr = ucode_start_addr; 3285 break; 3286 case PSP_FW_TYPE_PSP_RAS_DRV: 3287 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3288 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3289 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3290 psp->ras_drv.start_addr = ucode_start_addr; 3291 break; 3292 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3293 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3294 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3295 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3296 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3297 break; 3298 default: 3299 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3300 break; 3301 } 3302 3303 return 0; 3304 } 3305 3306 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3307 { 3308 const struct psp_firmware_header_v1_0 *sos_hdr; 3309 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3310 uint8_t *ucode_array_start_addr; 3311 3312 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3313 ucode_array_start_addr = (uint8_t *)sos_hdr + 3314 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3315 3316 if (adev->gmc.xgmi.connected_to_cpu || 3317 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3318 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3319 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3320 3321 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3322 adev->psp.sys.start_addr = ucode_array_start_addr; 3323 3324 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3325 adev->psp.sos.start_addr = ucode_array_start_addr + 3326 le32_to_cpu(sos_hdr->sos.offset_bytes); 3327 } else { 3328 /* Load alternate PSP SOS FW */ 3329 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3330 3331 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3332 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3333 3334 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3335 adev->psp.sys.start_addr = ucode_array_start_addr + 3336 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3337 3338 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3339 adev->psp.sos.start_addr = ucode_array_start_addr + 3340 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3341 } 3342 3343 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3344 dev_warn(adev->dev, "PSP SOS FW not available"); 3345 return -EINVAL; 3346 } 3347 3348 return 0; 3349 } 3350 3351 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3352 { 3353 struct amdgpu_device *adev = psp->adev; 3354 char fw_name[PSP_FW_NAME_LEN]; 3355 const struct psp_firmware_header_v1_0 *sos_hdr; 3356 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3357 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3358 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3359 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3360 int err = 0; 3361 uint8_t *ucode_array_start_addr; 3362 int fw_index = 0; 3363 3364 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 3365 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name); 3366 if (err) 3367 goto out; 3368 3369 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3370 ucode_array_start_addr = (uint8_t *)sos_hdr + 3371 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3372 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3373 3374 switch (sos_hdr->header.header_version_major) { 3375 case 1: 3376 err = psp_init_sos_base_fw(adev); 3377 if (err) 3378 goto out; 3379 3380 if (sos_hdr->header.header_version_minor == 1) { 3381 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3382 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3383 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3384 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3385 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3386 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3387 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3388 } 3389 if (sos_hdr->header.header_version_minor == 2) { 3390 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3391 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3392 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3393 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3394 } 3395 if (sos_hdr->header.header_version_minor == 3) { 3396 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3397 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3398 adev->psp.toc.start_addr = ucode_array_start_addr + 3399 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3400 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3401 adev->psp.kdb.start_addr = ucode_array_start_addr + 3402 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3403 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3404 adev->psp.spl.start_addr = ucode_array_start_addr + 3405 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3406 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3407 adev->psp.rl.start_addr = ucode_array_start_addr + 3408 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3409 } 3410 break; 3411 case 2: 3412 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3413 3414 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3415 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3416 err = -EINVAL; 3417 goto out; 3418 } 3419 3420 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) { 3421 err = parse_sos_bin_descriptor(psp, 3422 &sos_hdr_v2_0->psp_fw_bin[fw_index], 3423 sos_hdr_v2_0); 3424 if (err) 3425 goto out; 3426 } 3427 break; 3428 default: 3429 dev_err(adev->dev, 3430 "unsupported psp sos firmware\n"); 3431 err = -EINVAL; 3432 goto out; 3433 } 3434 3435 return 0; 3436 out: 3437 amdgpu_ucode_release(&adev->psp.sos_fw); 3438 3439 return err; 3440 } 3441 3442 static int parse_ta_bin_descriptor(struct psp_context *psp, 3443 const struct psp_fw_bin_desc *desc, 3444 const struct ta_firmware_header_v2_0 *ta_hdr) 3445 { 3446 uint8_t *ucode_start_addr = NULL; 3447 3448 if (!psp || !desc || !ta_hdr) 3449 return -EINVAL; 3450 3451 ucode_start_addr = (uint8_t *)ta_hdr + 3452 le32_to_cpu(desc->offset_bytes) + 3453 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3454 3455 switch (desc->fw_type) { 3456 case TA_FW_TYPE_PSP_ASD: 3457 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3458 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3459 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3460 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3461 break; 3462 case TA_FW_TYPE_PSP_XGMI: 3463 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3464 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3465 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3466 break; 3467 case TA_FW_TYPE_PSP_RAS: 3468 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3469 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3470 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3471 break; 3472 case TA_FW_TYPE_PSP_HDCP: 3473 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3474 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3475 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3476 break; 3477 case TA_FW_TYPE_PSP_DTM: 3478 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3479 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3480 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3481 break; 3482 case TA_FW_TYPE_PSP_RAP: 3483 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3484 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3485 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3486 break; 3487 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3488 psp->securedisplay_context.context.bin_desc.fw_version = 3489 le32_to_cpu(desc->fw_version); 3490 psp->securedisplay_context.context.bin_desc.size_bytes = 3491 le32_to_cpu(desc->size_bytes); 3492 psp->securedisplay_context.context.bin_desc.start_addr = 3493 ucode_start_addr; 3494 break; 3495 default: 3496 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3497 break; 3498 } 3499 3500 return 0; 3501 } 3502 3503 static int parse_ta_v1_microcode(struct psp_context *psp) 3504 { 3505 const struct ta_firmware_header_v1_0 *ta_hdr; 3506 struct amdgpu_device *adev = psp->adev; 3507 3508 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3509 3510 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3511 return -EINVAL; 3512 3513 adev->psp.xgmi_context.context.bin_desc.fw_version = 3514 le32_to_cpu(ta_hdr->xgmi.fw_version); 3515 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3516 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3517 adev->psp.xgmi_context.context.bin_desc.start_addr = 3518 (uint8_t *)ta_hdr + 3519 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3520 3521 adev->psp.ras_context.context.bin_desc.fw_version = 3522 le32_to_cpu(ta_hdr->ras.fw_version); 3523 adev->psp.ras_context.context.bin_desc.size_bytes = 3524 le32_to_cpu(ta_hdr->ras.size_bytes); 3525 adev->psp.ras_context.context.bin_desc.start_addr = 3526 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3527 le32_to_cpu(ta_hdr->ras.offset_bytes); 3528 3529 adev->psp.hdcp_context.context.bin_desc.fw_version = 3530 le32_to_cpu(ta_hdr->hdcp.fw_version); 3531 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3532 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3533 adev->psp.hdcp_context.context.bin_desc.start_addr = 3534 (uint8_t *)ta_hdr + 3535 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3536 3537 adev->psp.dtm_context.context.bin_desc.fw_version = 3538 le32_to_cpu(ta_hdr->dtm.fw_version); 3539 adev->psp.dtm_context.context.bin_desc.size_bytes = 3540 le32_to_cpu(ta_hdr->dtm.size_bytes); 3541 adev->psp.dtm_context.context.bin_desc.start_addr = 3542 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3543 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3544 3545 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3546 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3547 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3548 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3549 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3550 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3551 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3552 3553 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3554 3555 return 0; 3556 } 3557 3558 static int parse_ta_v2_microcode(struct psp_context *psp) 3559 { 3560 const struct ta_firmware_header_v2_0 *ta_hdr; 3561 struct amdgpu_device *adev = psp->adev; 3562 int err = 0; 3563 int ta_index = 0; 3564 3565 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3566 3567 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3568 return -EINVAL; 3569 3570 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3571 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3572 return -EINVAL; 3573 } 3574 3575 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3576 err = parse_ta_bin_descriptor(psp, 3577 &ta_hdr->ta_fw_bin[ta_index], 3578 ta_hdr); 3579 if (err) 3580 return err; 3581 } 3582 3583 return 0; 3584 } 3585 3586 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3587 { 3588 const struct common_firmware_header *hdr; 3589 struct amdgpu_device *adev = psp->adev; 3590 char fw_name[PSP_FW_NAME_LEN]; 3591 int err; 3592 3593 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 3594 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name); 3595 if (err) 3596 return err; 3597 3598 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3599 switch (le16_to_cpu(hdr->header_version_major)) { 3600 case 1: 3601 err = parse_ta_v1_microcode(psp); 3602 break; 3603 case 2: 3604 err = parse_ta_v2_microcode(psp); 3605 break; 3606 default: 3607 dev_err(adev->dev, "unsupported TA header version\n"); 3608 err = -EINVAL; 3609 } 3610 3611 if (err) 3612 amdgpu_ucode_release(&adev->psp.ta_fw); 3613 3614 return err; 3615 } 3616 3617 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3618 { 3619 struct amdgpu_device *adev = psp->adev; 3620 char fw_name[PSP_FW_NAME_LEN]; 3621 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3622 struct amdgpu_firmware_info *info = NULL; 3623 int err = 0; 3624 3625 if (!amdgpu_sriov_vf(adev)) { 3626 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3627 return -EINVAL; 3628 } 3629 3630 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name); 3631 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name); 3632 if (err) { 3633 if (err == -ENODEV) { 3634 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3635 err = 0; 3636 goto out; 3637 } 3638 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3639 } 3640 3641 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3642 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3643 info->fw = adev->psp.cap_fw; 3644 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3645 adev->psp.cap_fw->data; 3646 adev->firmware.fw_size += ALIGN( 3647 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3648 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3649 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3650 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3651 3652 return 0; 3653 3654 out: 3655 amdgpu_ucode_release(&adev->psp.cap_fw); 3656 return err; 3657 } 3658 3659 static int psp_set_clockgating_state(void *handle, 3660 enum amd_clockgating_state state) 3661 { 3662 return 0; 3663 } 3664 3665 static int psp_set_powergating_state(void *handle, 3666 enum amd_powergating_state state) 3667 { 3668 return 0; 3669 } 3670 3671 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 3672 struct device_attribute *attr, 3673 char *buf) 3674 { 3675 struct drm_device *ddev = dev_get_drvdata(dev); 3676 struct amdgpu_device *adev = drm_to_adev(ddev); 3677 uint32_t fw_ver; 3678 int ret; 3679 3680 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3681 dev_info(adev->dev, "PSP block is not ready yet\n."); 3682 return -EBUSY; 3683 } 3684 3685 mutex_lock(&adev->psp.mutex); 3686 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 3687 mutex_unlock(&adev->psp.mutex); 3688 3689 if (ret) { 3690 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 3691 return ret; 3692 } 3693 3694 return sysfs_emit(buf, "%x\n", fw_ver); 3695 } 3696 3697 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 3698 struct device_attribute *attr, 3699 const char *buf, 3700 size_t count) 3701 { 3702 struct drm_device *ddev = dev_get_drvdata(dev); 3703 struct amdgpu_device *adev = drm_to_adev(ddev); 3704 int ret, idx; 3705 char fw_name[100]; 3706 const struct firmware *usbc_pd_fw; 3707 struct amdgpu_bo *fw_buf_bo = NULL; 3708 uint64_t fw_pri_mc_addr; 3709 void *fw_pri_cpu_addr; 3710 3711 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3712 dev_err(adev->dev, "PSP block is not ready yet."); 3713 return -EBUSY; 3714 } 3715 3716 if (!drm_dev_enter(ddev, &idx)) 3717 return -ENODEV; 3718 3719 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 3720 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 3721 if (ret) 3722 goto fail; 3723 3724 /* LFB address which is aligned to 1MB boundary per PSP request */ 3725 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3726 AMDGPU_GEM_DOMAIN_VRAM | 3727 AMDGPU_GEM_DOMAIN_GTT, 3728 &fw_buf_bo, &fw_pri_mc_addr, 3729 &fw_pri_cpu_addr); 3730 if (ret) 3731 goto rel_buf; 3732 3733 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 3734 3735 mutex_lock(&adev->psp.mutex); 3736 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 3737 mutex_unlock(&adev->psp.mutex); 3738 3739 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3740 3741 rel_buf: 3742 release_firmware(usbc_pd_fw); 3743 fail: 3744 if (ret) { 3745 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 3746 count = ret; 3747 } 3748 3749 drm_dev_exit(idx); 3750 return count; 3751 } 3752 3753 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 3754 { 3755 int idx; 3756 3757 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 3758 return; 3759 3760 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 3761 memcpy(psp->fw_pri_buf, start_addr, bin_size); 3762 3763 drm_dev_exit(idx); 3764 } 3765 3766 /** 3767 * DOC: usbc_pd_fw 3768 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 3769 * this file will trigger the update process. 3770 */ 3771 static DEVICE_ATTR(usbc_pd_fw, 0644, 3772 psp_usbc_pd_fw_sysfs_read, 3773 psp_usbc_pd_fw_sysfs_write); 3774 3775 int is_psp_fw_valid(struct psp_bin_desc bin) 3776 { 3777 return bin.size_bytes; 3778 } 3779 3780 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 3781 struct bin_attribute *bin_attr, 3782 char *buffer, loff_t pos, size_t count) 3783 { 3784 struct device *dev = kobj_to_dev(kobj); 3785 struct drm_device *ddev = dev_get_drvdata(dev); 3786 struct amdgpu_device *adev = drm_to_adev(ddev); 3787 3788 adev->psp.vbflash_done = false; 3789 3790 /* Safeguard against memory drain */ 3791 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 3792 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 3793 kvfree(adev->psp.vbflash_tmp_buf); 3794 adev->psp.vbflash_tmp_buf = NULL; 3795 adev->psp.vbflash_image_size = 0; 3796 return -ENOMEM; 3797 } 3798 3799 /* TODO Just allocate max for now and optimize to realloc later if needed */ 3800 if (!adev->psp.vbflash_tmp_buf) { 3801 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 3802 if (!adev->psp.vbflash_tmp_buf) 3803 return -ENOMEM; 3804 } 3805 3806 mutex_lock(&adev->psp.mutex); 3807 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 3808 adev->psp.vbflash_image_size += count; 3809 mutex_unlock(&adev->psp.mutex); 3810 3811 dev_dbg(adev->dev, "IFWI staged for update\n"); 3812 3813 return count; 3814 } 3815 3816 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 3817 struct bin_attribute *bin_attr, char *buffer, 3818 loff_t pos, size_t count) 3819 { 3820 struct device *dev = kobj_to_dev(kobj); 3821 struct drm_device *ddev = dev_get_drvdata(dev); 3822 struct amdgpu_device *adev = drm_to_adev(ddev); 3823 struct amdgpu_bo *fw_buf_bo = NULL; 3824 uint64_t fw_pri_mc_addr; 3825 void *fw_pri_cpu_addr; 3826 int ret; 3827 3828 if (adev->psp.vbflash_image_size == 0) 3829 return -EINVAL; 3830 3831 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 3832 3833 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 3834 AMDGPU_GPU_PAGE_SIZE, 3835 AMDGPU_GEM_DOMAIN_VRAM, 3836 &fw_buf_bo, 3837 &fw_pri_mc_addr, 3838 &fw_pri_cpu_addr); 3839 if (ret) 3840 goto rel_buf; 3841 3842 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 3843 3844 mutex_lock(&adev->psp.mutex); 3845 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 3846 mutex_unlock(&adev->psp.mutex); 3847 3848 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3849 3850 rel_buf: 3851 kvfree(adev->psp.vbflash_tmp_buf); 3852 adev->psp.vbflash_tmp_buf = NULL; 3853 adev->psp.vbflash_image_size = 0; 3854 3855 if (ret) { 3856 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 3857 return ret; 3858 } 3859 3860 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 3861 return 0; 3862 } 3863 3864 /** 3865 * DOC: psp_vbflash 3866 * Writing to this file will stage an IFWI for update. Reading from this file 3867 * will trigger the update process. 3868 */ 3869 static struct bin_attribute psp_vbflash_bin_attr = { 3870 .attr = {.name = "psp_vbflash", .mode = 0660}, 3871 .size = 0, 3872 .write = amdgpu_psp_vbflash_write, 3873 .read = amdgpu_psp_vbflash_read, 3874 }; 3875 3876 /** 3877 * DOC: psp_vbflash_status 3878 * The status of the flash process. 3879 * 0: IFWI flash not complete. 3880 * 1: IFWI flash complete. 3881 */ 3882 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 3883 struct device_attribute *attr, 3884 char *buf) 3885 { 3886 struct drm_device *ddev = dev_get_drvdata(dev); 3887 struct amdgpu_device *adev = drm_to_adev(ddev); 3888 uint32_t vbflash_status; 3889 3890 vbflash_status = psp_vbflash_status(&adev->psp); 3891 if (!adev->psp.vbflash_done) 3892 vbflash_status = 0; 3893 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 3894 vbflash_status = 1; 3895 3896 return sysfs_emit(buf, "0x%x\n", vbflash_status); 3897 } 3898 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 3899 3900 static struct bin_attribute *bin_flash_attrs[] = { 3901 &psp_vbflash_bin_attr, 3902 NULL 3903 }; 3904 3905 static struct attribute *flash_attrs[] = { 3906 &dev_attr_psp_vbflash_status.attr, 3907 &dev_attr_usbc_pd_fw.attr, 3908 NULL 3909 }; 3910 3911 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 3912 { 3913 struct device *dev = kobj_to_dev(kobj); 3914 struct drm_device *ddev = dev_get_drvdata(dev); 3915 struct amdgpu_device *adev = drm_to_adev(ddev); 3916 3917 if (attr == &dev_attr_usbc_pd_fw.attr) 3918 return adev->psp.sup_pd_fw_up ? 0660 : 0; 3919 3920 return adev->psp.sup_ifwi_up ? 0440 : 0; 3921 } 3922 3923 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 3924 struct bin_attribute *attr, 3925 int idx) 3926 { 3927 struct device *dev = kobj_to_dev(kobj); 3928 struct drm_device *ddev = dev_get_drvdata(dev); 3929 struct amdgpu_device *adev = drm_to_adev(ddev); 3930 3931 return adev->psp.sup_ifwi_up ? 0660 : 0; 3932 } 3933 3934 const struct attribute_group amdgpu_flash_attr_group = { 3935 .attrs = flash_attrs, 3936 .bin_attrs = bin_flash_attrs, 3937 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 3938 .is_visible = amdgpu_flash_attr_is_visible, 3939 }; 3940 3941 const struct amd_ip_funcs psp_ip_funcs = { 3942 .name = "psp", 3943 .early_init = psp_early_init, 3944 .late_init = NULL, 3945 .sw_init = psp_sw_init, 3946 .sw_fini = psp_sw_fini, 3947 .hw_init = psp_hw_init, 3948 .hw_fini = psp_hw_fini, 3949 .suspend = psp_suspend, 3950 .resume = psp_resume, 3951 .is_idle = NULL, 3952 .check_soft_reset = NULL, 3953 .wait_for_idle = NULL, 3954 .soft_reset = NULL, 3955 .set_clockgating_state = psp_set_clockgating_state, 3956 .set_powergating_state = psp_set_powergating_state, 3957 }; 3958 3959 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 3960 .type = AMD_IP_BLOCK_TYPE_PSP, 3961 .major = 3, 3962 .minor = 1, 3963 .rev = 0, 3964 .funcs = &psp_ip_funcs, 3965 }; 3966 3967 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 3968 .type = AMD_IP_BLOCK_TYPE_PSP, 3969 .major = 10, 3970 .minor = 0, 3971 .rev = 0, 3972 .funcs = &psp_ip_funcs, 3973 }; 3974 3975 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 3976 .type = AMD_IP_BLOCK_TYPE_PSP, 3977 .major = 11, 3978 .minor = 0, 3979 .rev = 0, 3980 .funcs = &psp_ip_funcs, 3981 }; 3982 3983 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 3984 .type = AMD_IP_BLOCK_TYPE_PSP, 3985 .major = 11, 3986 .minor = 0, 3987 .rev = 8, 3988 .funcs = &psp_ip_funcs, 3989 }; 3990 3991 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 3992 .type = AMD_IP_BLOCK_TYPE_PSP, 3993 .major = 12, 3994 .minor = 0, 3995 .rev = 0, 3996 .funcs = &psp_ip_funcs, 3997 }; 3998 3999 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4000 .type = AMD_IP_BLOCK_TYPE_PSP, 4001 .major = 13, 4002 .minor = 0, 4003 .rev = 0, 4004 .funcs = &psp_ip_funcs, 4005 }; 4006 4007 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4008 .type = AMD_IP_BLOCK_TYPE_PSP, 4009 .major = 13, 4010 .minor = 0, 4011 .rev = 4, 4012 .funcs = &psp_ip_funcs, 4013 }; 4014 4015 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4016 .type = AMD_IP_BLOCK_TYPE_PSP, 4017 .major = 14, 4018 .minor = 0, 4019 .rev = 0, 4020 .funcs = &psp_ip_funcs, 4021 }; 4022