1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 43 #include "amdgpu_ras.h" 44 #include "amdgpu_securedisplay.h" 45 #include "amdgpu_atomfirmware.h" 46 47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) 48 49 static int psp_load_smu_fw(struct psp_context *psp); 50 static int psp_rap_terminate(struct psp_context *psp); 51 static int psp_securedisplay_terminate(struct psp_context *psp); 52 53 static int psp_ring_init(struct psp_context *psp, 54 enum psp_ring_type ring_type) 55 { 56 int ret = 0; 57 struct psp_ring *ring; 58 struct amdgpu_device *adev = psp->adev; 59 60 ring = &psp->km_ring; 61 62 ring->ring_type = ring_type; 63 64 /* allocate 4k Page of Local Frame Buffer memory for ring */ 65 ring->ring_size = 0x1000; 66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 67 AMDGPU_GEM_DOMAIN_VRAM | 68 AMDGPU_GEM_DOMAIN_GTT, 69 &adev->firmware.rbuf, 70 &ring->ring_mem_mc_addr, 71 (void **)&ring->ring_mem); 72 if (ret) { 73 ring->ring_size = 0; 74 return ret; 75 } 76 77 return 0; 78 } 79 80 /* 81 * Due to DF Cstate management centralized to PMFW, the firmware 82 * loading sequence will be updated as below: 83 * - Load KDB 84 * - Load SYS_DRV 85 * - Load tOS 86 * - Load PMFW 87 * - Setup TMR 88 * - Load other non-psp fw 89 * - Load ASD 90 * - Load XGMI/RAS/HDCP/DTM TA if any 91 * 92 * This new sequence is required for 93 * - Arcturus and onwards 94 */ 95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 96 { 97 struct amdgpu_device *adev = psp->adev; 98 99 if (amdgpu_sriov_vf(adev)) { 100 psp->pmfw_centralized_cstate_management = false; 101 return; 102 } 103 104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 105 case IP_VERSION(11, 0, 0): 106 case IP_VERSION(11, 0, 4): 107 case IP_VERSION(11, 0, 5): 108 case IP_VERSION(11, 0, 7): 109 case IP_VERSION(11, 0, 9): 110 case IP_VERSION(11, 0, 11): 111 case IP_VERSION(11, 0, 12): 112 case IP_VERSION(11, 0, 13): 113 case IP_VERSION(13, 0, 0): 114 case IP_VERSION(13, 0, 2): 115 case IP_VERSION(13, 0, 7): 116 psp->pmfw_centralized_cstate_management = true; 117 break; 118 default: 119 psp->pmfw_centralized_cstate_management = false; 120 break; 121 } 122 } 123 124 static int psp_init_sriov_microcode(struct psp_context *psp) 125 { 126 struct amdgpu_device *adev = psp->adev; 127 char ucode_prefix[30]; 128 int ret = 0; 129 130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 131 132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 133 case IP_VERSION(9, 0, 0): 134 case IP_VERSION(11, 0, 7): 135 case IP_VERSION(11, 0, 9): 136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 137 ret = psp_init_cap_microcode(psp, ucode_prefix); 138 break; 139 case IP_VERSION(13, 0, 2): 140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 141 ret = psp_init_cap_microcode(psp, ucode_prefix); 142 ret &= psp_init_ta_microcode(psp, ucode_prefix); 143 break; 144 case IP_VERSION(13, 0, 0): 145 adev->virt.autoload_ucode_id = 0; 146 break; 147 case IP_VERSION(13, 0, 6): 148 case IP_VERSION(13, 0, 14): 149 ret = psp_init_cap_microcode(psp, ucode_prefix); 150 ret &= psp_init_ta_microcode(psp, ucode_prefix); 151 break; 152 case IP_VERSION(13, 0, 10): 153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 154 ret = psp_init_cap_microcode(psp, ucode_prefix); 155 break; 156 default: 157 return -EINVAL; 158 } 159 return ret; 160 } 161 162 static int psp_early_init(struct amdgpu_ip_block *ip_block) 163 { 164 struct amdgpu_device *adev = ip_block->adev; 165 struct psp_context *psp = &adev->psp; 166 167 psp->autoload_supported = true; 168 psp->boot_time_tmr = true; 169 170 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 171 case IP_VERSION(9, 0, 0): 172 psp_v3_1_set_psp_funcs(psp); 173 psp->autoload_supported = false; 174 psp->boot_time_tmr = false; 175 break; 176 case IP_VERSION(10, 0, 0): 177 case IP_VERSION(10, 0, 1): 178 psp_v10_0_set_psp_funcs(psp); 179 psp->autoload_supported = false; 180 psp->boot_time_tmr = false; 181 break; 182 case IP_VERSION(11, 0, 2): 183 case IP_VERSION(11, 0, 4): 184 psp_v11_0_set_psp_funcs(psp); 185 psp->autoload_supported = false; 186 psp->boot_time_tmr = false; 187 break; 188 case IP_VERSION(11, 0, 0): 189 case IP_VERSION(11, 0, 7): 190 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 191 fallthrough; 192 case IP_VERSION(11, 0, 5): 193 case IP_VERSION(11, 0, 9): 194 case IP_VERSION(11, 0, 11): 195 case IP_VERSION(11, 5, 0): 196 case IP_VERSION(11, 0, 12): 197 case IP_VERSION(11, 0, 13): 198 psp_v11_0_set_psp_funcs(psp); 199 psp->boot_time_tmr = false; 200 break; 201 case IP_VERSION(11, 0, 3): 202 case IP_VERSION(12, 0, 1): 203 psp_v12_0_set_psp_funcs(psp); 204 psp->autoload_supported = false; 205 psp->boot_time_tmr = false; 206 break; 207 case IP_VERSION(13, 0, 2): 208 psp->boot_time_tmr = false; 209 fallthrough; 210 case IP_VERSION(13, 0, 6): 211 case IP_VERSION(13, 0, 14): 212 psp_v13_0_set_psp_funcs(psp); 213 psp->autoload_supported = false; 214 break; 215 case IP_VERSION(13, 0, 1): 216 case IP_VERSION(13, 0, 3): 217 case IP_VERSION(13, 0, 5): 218 case IP_VERSION(13, 0, 8): 219 case IP_VERSION(13, 0, 11): 220 case IP_VERSION(14, 0, 0): 221 case IP_VERSION(14, 0, 1): 222 case IP_VERSION(14, 0, 4): 223 psp_v13_0_set_psp_funcs(psp); 224 psp->boot_time_tmr = false; 225 break; 226 case IP_VERSION(11, 0, 8): 227 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 228 psp_v11_0_8_set_psp_funcs(psp); 229 } 230 psp->autoload_supported = false; 231 psp->boot_time_tmr = false; 232 break; 233 case IP_VERSION(13, 0, 0): 234 case IP_VERSION(13, 0, 7): 235 case IP_VERSION(13, 0, 10): 236 psp_v13_0_set_psp_funcs(psp); 237 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 238 psp->boot_time_tmr = false; 239 break; 240 case IP_VERSION(13, 0, 4): 241 psp_v13_0_4_set_psp_funcs(psp); 242 psp->boot_time_tmr = false; 243 break; 244 case IP_VERSION(14, 0, 2): 245 case IP_VERSION(14, 0, 3): 246 psp_v14_0_set_psp_funcs(psp); 247 break; 248 default: 249 return -EINVAL; 250 } 251 252 psp->adev = adev; 253 254 adev->psp_timeout = 20000; 255 256 psp_check_pmfw_centralized_cstate_management(psp); 257 258 if (amdgpu_sriov_vf(adev)) 259 return psp_init_sriov_microcode(psp); 260 else 261 return psp_init_microcode(psp); 262 } 263 264 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 265 { 266 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 267 &mem_ctx->shared_buf); 268 mem_ctx->shared_bo = NULL; 269 } 270 271 static void psp_free_shared_bufs(struct psp_context *psp) 272 { 273 void *tmr_buf; 274 void **pptr; 275 276 /* free TMR memory buffer */ 277 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 278 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 279 psp->tmr_bo = NULL; 280 281 /* free xgmi shared memory */ 282 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 283 284 /* free ras shared memory */ 285 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 286 287 /* free hdcp shared memory */ 288 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 289 290 /* free dtm shared memory */ 291 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 292 293 /* free rap shared memory */ 294 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 295 296 /* free securedisplay shared memory */ 297 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 298 299 300 } 301 302 static void psp_memory_training_fini(struct psp_context *psp) 303 { 304 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 305 306 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 307 kfree(ctx->sys_cache); 308 ctx->sys_cache = NULL; 309 } 310 311 static int psp_memory_training_init(struct psp_context *psp) 312 { 313 int ret; 314 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 315 316 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 317 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 318 return 0; 319 } 320 321 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 322 if (ctx->sys_cache == NULL) { 323 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 324 ret = -ENOMEM; 325 goto Err_out; 326 } 327 328 dev_dbg(psp->adev->dev, 329 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 330 ctx->train_data_size, 331 ctx->p2c_train_data_offset, 332 ctx->c2p_train_data_offset); 333 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 334 return 0; 335 336 Err_out: 337 psp_memory_training_fini(psp); 338 return ret; 339 } 340 341 /* 342 * Helper funciton to query psp runtime database entry 343 * 344 * @adev: amdgpu_device pointer 345 * @entry_type: the type of psp runtime database entry 346 * @db_entry: runtime database entry pointer 347 * 348 * Return false if runtime database doesn't exit or entry is invalid 349 * or true if the specific database entry is found, and copy to @db_entry 350 */ 351 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 352 enum psp_runtime_entry_type entry_type, 353 void *db_entry) 354 { 355 uint64_t db_header_pos, db_dir_pos; 356 struct psp_runtime_data_header db_header = {0}; 357 struct psp_runtime_data_directory db_dir = {0}; 358 bool ret = false; 359 int i; 360 361 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 362 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 363 return false; 364 365 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 366 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 367 368 /* read runtime db header from vram */ 369 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 370 sizeof(struct psp_runtime_data_header), false); 371 372 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 373 /* runtime db doesn't exist, exit */ 374 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 375 return false; 376 } 377 378 /* read runtime database entry from vram */ 379 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 380 sizeof(struct psp_runtime_data_directory), false); 381 382 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 383 /* invalid db entry count, exit */ 384 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 385 return false; 386 } 387 388 /* look up for requested entry type */ 389 for (i = 0; i < db_dir.entry_count && !ret; i++) { 390 if (db_dir.entry_list[i].entry_type == entry_type) { 391 switch (entry_type) { 392 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 393 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 394 /* invalid db entry size */ 395 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 396 return false; 397 } 398 /* read runtime database entry */ 399 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 400 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 401 ret = true; 402 break; 403 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 404 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 405 /* invalid db entry size */ 406 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 407 return false; 408 } 409 /* read runtime database entry */ 410 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 411 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 412 ret = true; 413 break; 414 default: 415 ret = false; 416 break; 417 } 418 } 419 } 420 421 return ret; 422 } 423 424 static int psp_sw_init(struct amdgpu_ip_block *ip_block) 425 { 426 struct amdgpu_device *adev = ip_block->adev; 427 struct psp_context *psp = &adev->psp; 428 int ret; 429 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 430 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 431 struct psp_runtime_scpm_entry scpm_entry; 432 433 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 434 if (!psp->cmd) { 435 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 436 ret = -ENOMEM; 437 } 438 439 adev->psp.xgmi_context.supports_extended_data = 440 !adev->gmc.xgmi.connected_to_cpu && 441 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 442 443 memset(&scpm_entry, 0, sizeof(scpm_entry)); 444 if ((psp_get_runtime_db_entry(adev, 445 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 446 &scpm_entry)) && 447 (scpm_entry.scpm_status != SCPM_DISABLE)) { 448 adev->scpm_enabled = true; 449 adev->scpm_status = scpm_entry.scpm_status; 450 } else { 451 adev->scpm_enabled = false; 452 adev->scpm_status = SCPM_DISABLE; 453 } 454 455 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 456 457 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 458 if (psp_get_runtime_db_entry(adev, 459 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 460 &boot_cfg_entry)) { 461 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 462 if ((psp->boot_cfg_bitmask) & 463 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 464 /* If psp runtime database exists, then 465 * only enable two stage memory training 466 * when TWO_STAGE_DRAM_TRAINING bit is set 467 * in runtime database 468 */ 469 mem_training_ctx->enable_mem_training = true; 470 } 471 472 } else { 473 /* If psp runtime database doesn't exist or is 474 * invalid, force enable two stage memory training 475 */ 476 mem_training_ctx->enable_mem_training = true; 477 } 478 479 if (mem_training_ctx->enable_mem_training) { 480 ret = psp_memory_training_init(psp); 481 if (ret) { 482 dev_err(adev->dev, "Failed to initialize memory training!\n"); 483 return ret; 484 } 485 486 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 487 if (ret) { 488 dev_err(adev->dev, "Failed to process memory training!\n"); 489 return ret; 490 } 491 } 492 493 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 494 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 495 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 496 &psp->fw_pri_bo, 497 &psp->fw_pri_mc_addr, 498 &psp->fw_pri_buf); 499 if (ret) 500 return ret; 501 502 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 503 AMDGPU_GEM_DOMAIN_VRAM | 504 AMDGPU_GEM_DOMAIN_GTT, 505 &psp->fence_buf_bo, 506 &psp->fence_buf_mc_addr, 507 &psp->fence_buf); 508 if (ret) 509 goto failed1; 510 511 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 512 AMDGPU_GEM_DOMAIN_VRAM | 513 AMDGPU_GEM_DOMAIN_GTT, 514 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 515 (void **)&psp->cmd_buf_mem); 516 if (ret) 517 goto failed2; 518 519 return 0; 520 521 failed2: 522 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 523 &psp->fence_buf_mc_addr, &psp->fence_buf); 524 failed1: 525 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 526 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 527 return ret; 528 } 529 530 static int psp_sw_fini(struct amdgpu_ip_block *ip_block) 531 { 532 struct amdgpu_device *adev = ip_block->adev; 533 struct psp_context *psp = &adev->psp; 534 struct psp_gfx_cmd_resp *cmd = psp->cmd; 535 536 psp_memory_training_fini(psp); 537 538 amdgpu_ucode_release(&psp->sos_fw); 539 amdgpu_ucode_release(&psp->asd_fw); 540 amdgpu_ucode_release(&psp->ta_fw); 541 amdgpu_ucode_release(&psp->cap_fw); 542 amdgpu_ucode_release(&psp->toc_fw); 543 544 kfree(cmd); 545 cmd = NULL; 546 547 psp_free_shared_bufs(psp); 548 549 if (psp->km_ring.ring_mem) 550 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 551 &psp->km_ring.ring_mem_mc_addr, 552 (void **)&psp->km_ring.ring_mem); 553 554 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 555 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 556 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 557 &psp->fence_buf_mc_addr, &psp->fence_buf); 558 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 559 (void **)&psp->cmd_buf_mem); 560 561 return 0; 562 } 563 564 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 565 uint32_t reg_val, uint32_t mask, bool check_changed) 566 { 567 uint32_t val; 568 int i; 569 struct amdgpu_device *adev = psp->adev; 570 571 if (psp->adev->no_hw_access) 572 return 0; 573 574 for (i = 0; i < adev->usec_timeout; i++) { 575 val = RREG32(reg_index); 576 if (check_changed) { 577 if (val != reg_val) 578 return 0; 579 } else { 580 if ((val & mask) == reg_val) 581 return 0; 582 } 583 udelay(1); 584 } 585 586 return -ETIME; 587 } 588 589 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 590 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 591 { 592 uint32_t val; 593 int i; 594 struct amdgpu_device *adev = psp->adev; 595 596 if (psp->adev->no_hw_access) 597 return 0; 598 599 for (i = 0; i < msec_timeout; i++) { 600 val = RREG32(reg_index); 601 if ((val & mask) == reg_val) 602 return 0; 603 msleep(1); 604 } 605 606 return -ETIME; 607 } 608 609 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 610 { 611 switch (cmd_id) { 612 case GFX_CMD_ID_LOAD_TA: 613 return "LOAD_TA"; 614 case GFX_CMD_ID_UNLOAD_TA: 615 return "UNLOAD_TA"; 616 case GFX_CMD_ID_INVOKE_CMD: 617 return "INVOKE_CMD"; 618 case GFX_CMD_ID_LOAD_ASD: 619 return "LOAD_ASD"; 620 case GFX_CMD_ID_SETUP_TMR: 621 return "SETUP_TMR"; 622 case GFX_CMD_ID_LOAD_IP_FW: 623 return "LOAD_IP_FW"; 624 case GFX_CMD_ID_DESTROY_TMR: 625 return "DESTROY_TMR"; 626 case GFX_CMD_ID_SAVE_RESTORE: 627 return "SAVE_RESTORE_IP_FW"; 628 case GFX_CMD_ID_SETUP_VMR: 629 return "SETUP_VMR"; 630 case GFX_CMD_ID_DESTROY_VMR: 631 return "DESTROY_VMR"; 632 case GFX_CMD_ID_PROG_REG: 633 return "PROG_REG"; 634 case GFX_CMD_ID_GET_FW_ATTESTATION: 635 return "GET_FW_ATTESTATION"; 636 case GFX_CMD_ID_LOAD_TOC: 637 return "ID_LOAD_TOC"; 638 case GFX_CMD_ID_AUTOLOAD_RLC: 639 return "AUTOLOAD_RLC"; 640 case GFX_CMD_ID_BOOT_CFG: 641 return "BOOT_CFG"; 642 case GFX_CMD_ID_CONFIG_SQ_PERFMON: 643 return "CONFIG_SQ_PERFMON"; 644 default: 645 return "UNKNOWN CMD"; 646 } 647 } 648 649 static bool psp_err_warn(struct psp_context *psp) 650 { 651 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 652 653 /* This response indicates reg list is already loaded */ 654 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 655 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 656 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 657 cmd->resp.status == TEE_ERROR_CANCEL) 658 return false; 659 660 return true; 661 } 662 663 static int 664 psp_cmd_submit_buf(struct psp_context *psp, 665 struct amdgpu_firmware_info *ucode, 666 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 667 { 668 int ret; 669 int index; 670 int timeout = psp->adev->psp_timeout; 671 bool ras_intr = false; 672 bool skip_unsupport = false; 673 674 if (psp->adev->no_hw_access) 675 return 0; 676 677 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 678 679 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 680 681 index = atomic_inc_return(&psp->fence_value); 682 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 683 if (ret) { 684 atomic_dec(&psp->fence_value); 685 goto exit; 686 } 687 688 amdgpu_device_invalidate_hdp(psp->adev, NULL); 689 while (*((unsigned int *)psp->fence_buf) != index) { 690 if (--timeout == 0) 691 break; 692 /* 693 * Shouldn't wait for timeout when err_event_athub occurs, 694 * because gpu reset thread triggered and lock resource should 695 * be released for psp resume sequence. 696 */ 697 ras_intr = amdgpu_ras_intr_triggered(); 698 if (ras_intr) 699 break; 700 usleep_range(10, 100); 701 amdgpu_device_invalidate_hdp(psp->adev, NULL); 702 } 703 704 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 705 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 706 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 707 708 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 709 710 /* In some cases, psp response status is not 0 even there is no 711 * problem while the command is submitted. Some version of PSP FW 712 * doesn't write 0 to that field. 713 * So here we would like to only print a warning instead of an error 714 * during psp initialization to avoid breaking hw_init and it doesn't 715 * return -EINVAL. 716 */ 717 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 718 if (ucode) 719 dev_warn(psp->adev->dev, 720 "failed to load ucode %s(0x%X) ", 721 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 722 if (psp_err_warn(psp)) 723 dev_warn( 724 psp->adev->dev, 725 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 726 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 727 psp->cmd_buf_mem->cmd_id, 728 psp->cmd_buf_mem->resp.status); 729 /* If any firmware (including CAP) load fails under SRIOV, it should 730 * return failure to stop the VF from initializing. 731 * Also return failure in case of timeout 732 */ 733 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 734 ret = -EINVAL; 735 goto exit; 736 } 737 } 738 739 if (ucode) { 740 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 741 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 742 } 743 744 exit: 745 return ret; 746 } 747 748 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 749 { 750 struct psp_gfx_cmd_resp *cmd = psp->cmd; 751 752 mutex_lock(&psp->mutex); 753 754 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 755 756 return cmd; 757 } 758 759 static void release_psp_cmd_buf(struct psp_context *psp) 760 { 761 mutex_unlock(&psp->mutex); 762 } 763 764 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 765 struct psp_gfx_cmd_resp *cmd, 766 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 767 { 768 struct amdgpu_device *adev = psp->adev; 769 uint32_t size = 0; 770 uint64_t tmr_pa = 0; 771 772 if (tmr_bo) { 773 size = amdgpu_bo_size(tmr_bo); 774 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 775 } 776 777 if (amdgpu_sriov_vf(psp->adev)) 778 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 779 else 780 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 781 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 782 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 783 cmd->cmd.cmd_setup_tmr.buf_size = size; 784 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 785 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 786 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 787 } 788 789 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 790 uint64_t pri_buf_mc, uint32_t size) 791 { 792 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 793 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 794 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 795 cmd->cmd.cmd_load_toc.toc_size = size; 796 } 797 798 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 799 static int psp_load_toc(struct psp_context *psp, 800 uint32_t *tmr_size) 801 { 802 int ret; 803 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 804 805 /* Copy toc to psp firmware private buffer */ 806 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 807 808 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 809 810 ret = psp_cmd_submit_buf(psp, NULL, cmd, 811 psp->fence_buf_mc_addr); 812 if (!ret) 813 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 814 815 release_psp_cmd_buf(psp); 816 817 return ret; 818 } 819 820 /* Set up Trusted Memory Region */ 821 static int psp_tmr_init(struct psp_context *psp) 822 { 823 int ret = 0; 824 int tmr_size; 825 void *tmr_buf; 826 void **pptr; 827 828 /* 829 * According to HW engineer, they prefer the TMR address be "naturally 830 * aligned" , e.g. the start address be an integer divide of TMR size. 831 * 832 * Note: this memory need be reserved till the driver 833 * uninitializes. 834 */ 835 tmr_size = PSP_TMR_SIZE(psp->adev); 836 837 /* For ASICs support RLC autoload, psp will parse the toc 838 * and calculate the total size of TMR needed 839 */ 840 if (!amdgpu_sriov_vf(psp->adev) && 841 psp->toc.start_addr && 842 psp->toc.size_bytes && 843 psp->fw_pri_buf) { 844 ret = psp_load_toc(psp, &tmr_size); 845 if (ret) { 846 dev_err(psp->adev->dev, "Failed to load toc\n"); 847 return ret; 848 } 849 } 850 851 if (!psp->tmr_bo && !psp->boot_time_tmr) { 852 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 853 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 854 PSP_TMR_ALIGNMENT, 855 AMDGPU_HAS_VRAM(psp->adev) ? 856 AMDGPU_GEM_DOMAIN_VRAM : 857 AMDGPU_GEM_DOMAIN_GTT, 858 &psp->tmr_bo, &psp->tmr_mc_addr, 859 pptr); 860 } 861 862 return ret; 863 } 864 865 static bool psp_skip_tmr(struct psp_context *psp) 866 { 867 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 868 case IP_VERSION(11, 0, 9): 869 case IP_VERSION(11, 0, 7): 870 case IP_VERSION(13, 0, 2): 871 case IP_VERSION(13, 0, 6): 872 case IP_VERSION(13, 0, 10): 873 case IP_VERSION(13, 0, 14): 874 return true; 875 default: 876 return false; 877 } 878 } 879 880 static int psp_tmr_load(struct psp_context *psp) 881 { 882 int ret; 883 struct psp_gfx_cmd_resp *cmd; 884 885 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 886 * Already set up by host driver. 887 */ 888 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 889 return 0; 890 891 cmd = acquire_psp_cmd_buf(psp); 892 893 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 894 if (psp->tmr_bo) 895 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 896 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 897 898 ret = psp_cmd_submit_buf(psp, NULL, cmd, 899 psp->fence_buf_mc_addr); 900 901 release_psp_cmd_buf(psp); 902 903 return ret; 904 } 905 906 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 907 struct psp_gfx_cmd_resp *cmd) 908 { 909 if (amdgpu_sriov_vf(psp->adev)) 910 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 911 else 912 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 913 } 914 915 static int psp_tmr_unload(struct psp_context *psp) 916 { 917 int ret; 918 struct psp_gfx_cmd_resp *cmd; 919 920 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 921 * as TMR is not loaded at all 922 */ 923 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 924 return 0; 925 926 cmd = acquire_psp_cmd_buf(psp); 927 928 psp_prep_tmr_unload_cmd_buf(psp, cmd); 929 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 930 931 ret = psp_cmd_submit_buf(psp, NULL, cmd, 932 psp->fence_buf_mc_addr); 933 934 release_psp_cmd_buf(psp); 935 936 return ret; 937 } 938 939 static int psp_tmr_terminate(struct psp_context *psp) 940 { 941 return psp_tmr_unload(psp); 942 } 943 944 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 945 uint64_t *output_ptr) 946 { 947 int ret; 948 struct psp_gfx_cmd_resp *cmd; 949 950 if (!output_ptr) 951 return -EINVAL; 952 953 if (amdgpu_sriov_vf(psp->adev)) 954 return 0; 955 956 cmd = acquire_psp_cmd_buf(psp); 957 958 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 959 960 ret = psp_cmd_submit_buf(psp, NULL, cmd, 961 psp->fence_buf_mc_addr); 962 963 if (!ret) { 964 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 965 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 966 } 967 968 release_psp_cmd_buf(psp); 969 970 return ret; 971 } 972 973 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 974 { 975 struct psp_context *psp = &adev->psp; 976 struct psp_gfx_cmd_resp *cmd; 977 int ret; 978 979 if (amdgpu_sriov_vf(adev)) 980 return 0; 981 982 cmd = acquire_psp_cmd_buf(psp); 983 984 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 985 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 986 987 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 988 if (!ret) { 989 *boot_cfg = 990 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 991 } 992 993 release_psp_cmd_buf(psp); 994 995 return ret; 996 } 997 998 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 999 { 1000 int ret; 1001 struct psp_context *psp = &adev->psp; 1002 struct psp_gfx_cmd_resp *cmd; 1003 1004 if (amdgpu_sriov_vf(adev)) 1005 return 0; 1006 1007 cmd = acquire_psp_cmd_buf(psp); 1008 1009 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1010 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 1011 cmd->cmd.boot_cfg.boot_config = boot_cfg; 1012 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 1013 1014 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1015 1016 release_psp_cmd_buf(psp); 1017 1018 return ret; 1019 } 1020 1021 static int psp_rl_load(struct amdgpu_device *adev) 1022 { 1023 int ret; 1024 struct psp_context *psp = &adev->psp; 1025 struct psp_gfx_cmd_resp *cmd; 1026 1027 if (!is_psp_fw_valid(psp->rl)) 1028 return 0; 1029 1030 cmd = acquire_psp_cmd_buf(psp); 1031 1032 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1033 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1034 1035 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1036 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1037 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1038 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1039 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1040 1041 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1042 1043 release_psp_cmd_buf(psp); 1044 1045 return ret; 1046 } 1047 1048 int psp_memory_partition(struct psp_context *psp, int mode) 1049 { 1050 struct psp_gfx_cmd_resp *cmd; 1051 int ret; 1052 1053 if (amdgpu_sriov_vf(psp->adev)) 1054 return 0; 1055 1056 cmd = acquire_psp_cmd_buf(psp); 1057 1058 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; 1059 cmd->cmd.cmd_memory_part.mode = mode; 1060 1061 dev_info(psp->adev->dev, 1062 "Requesting %d memory partition change through PSP", mode); 1063 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1064 if (ret) 1065 dev_err(psp->adev->dev, 1066 "PSP request failed to change to NPS%d mode\n", mode); 1067 1068 release_psp_cmd_buf(psp); 1069 1070 return ret; 1071 } 1072 1073 int psp_spatial_partition(struct psp_context *psp, int mode) 1074 { 1075 struct psp_gfx_cmd_resp *cmd; 1076 int ret; 1077 1078 if (amdgpu_sriov_vf(psp->adev)) 1079 return 0; 1080 1081 cmd = acquire_psp_cmd_buf(psp); 1082 1083 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1084 cmd->cmd.cmd_spatial_part.mode = mode; 1085 1086 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1087 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1088 1089 release_psp_cmd_buf(psp); 1090 1091 return ret; 1092 } 1093 1094 static int psp_asd_initialize(struct psp_context *psp) 1095 { 1096 int ret; 1097 1098 /* If PSP version doesn't match ASD version, asd loading will be failed. 1099 * add workaround to bypass it for sriov now. 1100 * TODO: add version check to make it common 1101 */ 1102 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1103 return 0; 1104 1105 /* bypass asd if display hardware is not available */ 1106 if (!amdgpu_device_has_display_hardware(psp->adev) && 1107 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1108 return 0; 1109 1110 psp->asd_context.mem_context.shared_mc_addr = 0; 1111 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1112 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1113 1114 ret = psp_ta_load(psp, &psp->asd_context); 1115 if (!ret) 1116 psp->asd_context.initialized = true; 1117 1118 return ret; 1119 } 1120 1121 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1122 uint32_t session_id) 1123 { 1124 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1125 cmd->cmd.cmd_unload_ta.session_id = session_id; 1126 } 1127 1128 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1129 { 1130 int ret; 1131 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1132 1133 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1134 1135 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1136 1137 context->resp_status = cmd->resp.status; 1138 1139 release_psp_cmd_buf(psp); 1140 1141 return ret; 1142 } 1143 1144 static int psp_asd_terminate(struct psp_context *psp) 1145 { 1146 int ret; 1147 1148 if (amdgpu_sriov_vf(psp->adev)) 1149 return 0; 1150 1151 if (!psp->asd_context.initialized) 1152 return 0; 1153 1154 ret = psp_ta_unload(psp, &psp->asd_context); 1155 if (!ret) 1156 psp->asd_context.initialized = false; 1157 1158 return ret; 1159 } 1160 1161 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1162 uint32_t id, uint32_t value) 1163 { 1164 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1165 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1166 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1167 } 1168 1169 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1170 uint32_t value) 1171 { 1172 struct psp_gfx_cmd_resp *cmd; 1173 int ret = 0; 1174 1175 if (reg >= PSP_REG_LAST) 1176 return -EINVAL; 1177 1178 cmd = acquire_psp_cmd_buf(psp); 1179 1180 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1181 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1182 if (ret) 1183 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1184 1185 release_psp_cmd_buf(psp); 1186 1187 return ret; 1188 } 1189 1190 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1191 uint64_t ta_bin_mc, 1192 struct ta_context *context) 1193 { 1194 cmd->cmd_id = context->ta_load_type; 1195 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1196 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1197 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1198 1199 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1200 lower_32_bits(context->mem_context.shared_mc_addr); 1201 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1202 upper_32_bits(context->mem_context.shared_mc_addr); 1203 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1204 } 1205 1206 int psp_ta_init_shared_buf(struct psp_context *psp, 1207 struct ta_mem_context *mem_ctx) 1208 { 1209 /* 1210 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1211 * physical) for ta to host memory 1212 */ 1213 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1214 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1215 AMDGPU_GEM_DOMAIN_GTT, 1216 &mem_ctx->shared_bo, 1217 &mem_ctx->shared_mc_addr, 1218 &mem_ctx->shared_buf); 1219 } 1220 1221 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1222 uint32_t ta_cmd_id, 1223 uint32_t session_id) 1224 { 1225 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1226 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1227 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1228 } 1229 1230 int psp_ta_invoke(struct psp_context *psp, 1231 uint32_t ta_cmd_id, 1232 struct ta_context *context) 1233 { 1234 int ret; 1235 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1236 1237 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1238 1239 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1240 psp->fence_buf_mc_addr); 1241 1242 context->resp_status = cmd->resp.status; 1243 1244 release_psp_cmd_buf(psp); 1245 1246 return ret; 1247 } 1248 1249 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1250 { 1251 int ret; 1252 struct psp_gfx_cmd_resp *cmd; 1253 1254 cmd = acquire_psp_cmd_buf(psp); 1255 1256 psp_copy_fw(psp, context->bin_desc.start_addr, 1257 context->bin_desc.size_bytes); 1258 1259 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1260 1261 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1262 psp->fence_buf_mc_addr); 1263 1264 context->resp_status = cmd->resp.status; 1265 1266 if (!ret) 1267 context->session_id = cmd->resp.session_id; 1268 1269 release_psp_cmd_buf(psp); 1270 1271 return ret; 1272 } 1273 1274 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1275 { 1276 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1277 } 1278 1279 int psp_xgmi_terminate(struct psp_context *psp) 1280 { 1281 int ret; 1282 struct amdgpu_device *adev = psp->adev; 1283 1284 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1285 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1286 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1287 adev->gmc.xgmi.connected_to_cpu)) 1288 return 0; 1289 1290 if (!psp->xgmi_context.context.initialized) 1291 return 0; 1292 1293 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1294 1295 psp->xgmi_context.context.initialized = false; 1296 1297 return ret; 1298 } 1299 1300 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1301 { 1302 struct ta_xgmi_shared_memory *xgmi_cmd; 1303 int ret; 1304 1305 if (!psp->ta_fw || 1306 !psp->xgmi_context.context.bin_desc.size_bytes || 1307 !psp->xgmi_context.context.bin_desc.start_addr) 1308 return -ENOENT; 1309 1310 if (!load_ta) 1311 goto invoke; 1312 1313 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1314 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1315 1316 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1317 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1318 if (ret) 1319 return ret; 1320 } 1321 1322 /* Load XGMI TA */ 1323 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1324 if (!ret) 1325 psp->xgmi_context.context.initialized = true; 1326 else 1327 return ret; 1328 1329 invoke: 1330 /* Initialize XGMI session */ 1331 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1332 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1333 xgmi_cmd->flag_extend_link_record = set_extended_data; 1334 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1335 1336 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1337 /* note down the capbility flag for XGMI TA */ 1338 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1339 1340 return ret; 1341 } 1342 1343 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1344 { 1345 struct ta_xgmi_shared_memory *xgmi_cmd; 1346 int ret; 1347 1348 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1349 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1350 1351 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1352 1353 /* Invoke xgmi ta to get hive id */ 1354 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1355 if (ret) 1356 return ret; 1357 1358 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1359 1360 return 0; 1361 } 1362 1363 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1364 { 1365 struct ta_xgmi_shared_memory *xgmi_cmd; 1366 int ret; 1367 1368 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1369 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1370 1371 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1372 1373 /* Invoke xgmi ta to get the node id */ 1374 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1375 if (ret) 1376 return ret; 1377 1378 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1379 1380 return 0; 1381 } 1382 1383 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1384 { 1385 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1386 IP_VERSION(13, 0, 2) && 1387 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1388 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1389 IP_VERSION(13, 0, 6); 1390 } 1391 1392 /* 1393 * Chips that support extended topology information require the driver to 1394 * reflect topology information in the opposite direction. This is 1395 * because the TA has already exceeded its link record limit and if the 1396 * TA holds bi-directional information, the driver would have to do 1397 * multiple fetches instead of just two. 1398 */ 1399 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1400 struct psp_xgmi_node_info node_info) 1401 { 1402 struct amdgpu_device *mirror_adev; 1403 struct amdgpu_hive_info *hive; 1404 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1405 uint64_t dst_node_id = node_info.node_id; 1406 uint8_t dst_num_hops = node_info.num_hops; 1407 uint8_t dst_num_links = node_info.num_links; 1408 1409 hive = amdgpu_get_xgmi_hive(psp->adev); 1410 if (WARN_ON(!hive)) 1411 return; 1412 1413 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1414 struct psp_xgmi_topology_info *mirror_top_info; 1415 int j; 1416 1417 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1418 continue; 1419 1420 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1421 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1422 if (mirror_top_info->nodes[j].node_id != src_node_id) 1423 continue; 1424 1425 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1426 /* 1427 * prevent 0 num_links value re-reflection since reflection 1428 * criteria is based on num_hops (direct or indirect). 1429 * 1430 */ 1431 if (dst_num_links) 1432 mirror_top_info->nodes[j].num_links = dst_num_links; 1433 1434 break; 1435 } 1436 1437 break; 1438 } 1439 1440 amdgpu_put_xgmi_hive(hive); 1441 } 1442 1443 int psp_xgmi_get_topology_info(struct psp_context *psp, 1444 int number_devices, 1445 struct psp_xgmi_topology_info *topology, 1446 bool get_extended_data) 1447 { 1448 struct ta_xgmi_shared_memory *xgmi_cmd; 1449 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1450 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1451 int i; 1452 int ret; 1453 1454 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1455 return -EINVAL; 1456 1457 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1458 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1459 xgmi_cmd->flag_extend_link_record = get_extended_data; 1460 1461 /* Fill in the shared memory with topology information as input */ 1462 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1463 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1464 topology_info_input->num_nodes = number_devices; 1465 1466 for (i = 0; i < topology_info_input->num_nodes; i++) { 1467 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1468 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1469 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1470 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1471 } 1472 1473 /* Invoke xgmi ta to get the topology information */ 1474 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1475 if (ret) 1476 return ret; 1477 1478 /* Read the output topology information from the shared memory */ 1479 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1480 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1481 for (i = 0; i < topology->num_nodes; i++) { 1482 /* extended data will either be 0 or equal to non-extended data */ 1483 if (topology_info_output->nodes[i].num_hops) 1484 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1485 1486 /* non-extended data gets everything here so no need to update */ 1487 if (!get_extended_data) { 1488 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1489 topology->nodes[i].is_sharing_enabled = 1490 topology_info_output->nodes[i].is_sharing_enabled; 1491 topology->nodes[i].sdma_engine = 1492 topology_info_output->nodes[i].sdma_engine; 1493 } 1494 1495 } 1496 1497 /* Invoke xgmi ta again to get the link information */ 1498 if (psp_xgmi_peer_link_info_supported(psp)) { 1499 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1500 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1501 bool requires_reflection = 1502 (psp->xgmi_context.supports_extended_data && 1503 get_extended_data) || 1504 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1505 IP_VERSION(13, 0, 6) || 1506 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1507 IP_VERSION(13, 0, 14); 1508 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1509 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1510 1511 /* popluate the shared output buffer rather than the cmd input buffer 1512 * with node_ids as the input for GET_PEER_LINKS command execution. 1513 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1514 * The same requirement for GET_EXTEND_PEER_LINKS command. 1515 */ 1516 if (ta_port_num_support) { 1517 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1518 1519 for (i = 0; i < topology->num_nodes; i++) 1520 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1521 1522 link_extend_info_output->num_nodes = topology->num_nodes; 1523 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1524 } else { 1525 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1526 1527 for (i = 0; i < topology->num_nodes; i++) 1528 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1529 1530 link_info_output->num_nodes = topology->num_nodes; 1531 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1532 } 1533 1534 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1535 if (ret) 1536 return ret; 1537 1538 for (i = 0; i < topology->num_nodes; i++) { 1539 uint8_t node_num_links = ta_port_num_support ? 1540 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1541 /* accumulate num_links on extended data */ 1542 if (get_extended_data) { 1543 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1544 } else { 1545 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1546 topology->nodes[i].num_links : node_num_links; 1547 } 1548 /* popluate the connected port num info if supported and available */ 1549 if (ta_port_num_support && topology->nodes[i].num_links) { 1550 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1551 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1552 } 1553 1554 /* reflect the topology information for bi-directionality */ 1555 if (requires_reflection && topology->nodes[i].num_hops) 1556 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1557 } 1558 } 1559 1560 return 0; 1561 } 1562 1563 int psp_xgmi_set_topology_info(struct psp_context *psp, 1564 int number_devices, 1565 struct psp_xgmi_topology_info *topology) 1566 { 1567 struct ta_xgmi_shared_memory *xgmi_cmd; 1568 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1569 int i; 1570 1571 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1572 return -EINVAL; 1573 1574 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1575 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1576 1577 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1578 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1579 topology_info_input->num_nodes = number_devices; 1580 1581 for (i = 0; i < topology_info_input->num_nodes; i++) { 1582 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1583 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1584 topology_info_input->nodes[i].is_sharing_enabled = 1; 1585 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1586 } 1587 1588 /* Invoke xgmi ta to set topology information */ 1589 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1590 } 1591 1592 // ras begin 1593 static void psp_ras_ta_check_status(struct psp_context *psp) 1594 { 1595 struct ta_ras_shared_memory *ras_cmd = 1596 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1597 1598 switch (ras_cmd->ras_status) { 1599 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1600 dev_warn(psp->adev->dev, 1601 "RAS WARNING: cmd failed due to unsupported ip\n"); 1602 break; 1603 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1604 dev_warn(psp->adev->dev, 1605 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1606 break; 1607 case TA_RAS_STATUS__SUCCESS: 1608 break; 1609 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1610 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1611 dev_warn(psp->adev->dev, 1612 "RAS WARNING: Inject error to critical region is not allowed\n"); 1613 break; 1614 default: 1615 dev_warn(psp->adev->dev, 1616 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1617 break; 1618 } 1619 } 1620 1621 static int psp_ras_send_cmd(struct psp_context *psp, 1622 enum ras_command cmd_id, void *in, void *out) 1623 { 1624 struct ta_ras_shared_memory *ras_cmd; 1625 uint32_t cmd = cmd_id; 1626 int ret = 0; 1627 1628 if (!in) 1629 return -EINVAL; 1630 1631 mutex_lock(&psp->ras_context.mutex); 1632 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1633 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1634 1635 switch (cmd) { 1636 case TA_RAS_COMMAND__ENABLE_FEATURES: 1637 case TA_RAS_COMMAND__DISABLE_FEATURES: 1638 memcpy(&ras_cmd->ras_in_message, 1639 in, sizeof(ras_cmd->ras_in_message)); 1640 break; 1641 case TA_RAS_COMMAND__TRIGGER_ERROR: 1642 memcpy(&ras_cmd->ras_in_message.trigger_error, 1643 in, sizeof(ras_cmd->ras_in_message.trigger_error)); 1644 break; 1645 case TA_RAS_COMMAND__QUERY_ADDRESS: 1646 memcpy(&ras_cmd->ras_in_message.address, 1647 in, sizeof(ras_cmd->ras_in_message.address)); 1648 break; 1649 default: 1650 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd); 1651 ret = -EINVAL; 1652 goto err_out; 1653 } 1654 1655 ras_cmd->cmd_id = cmd; 1656 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1657 1658 switch (cmd) { 1659 case TA_RAS_COMMAND__TRIGGER_ERROR: 1660 if (!ret && out) 1661 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status)); 1662 break; 1663 case TA_RAS_COMMAND__QUERY_ADDRESS: 1664 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1665 ret = -EINVAL; 1666 else if (out) 1667 memcpy(out, 1668 &ras_cmd->ras_out_message.address, 1669 sizeof(ras_cmd->ras_out_message.address)); 1670 break; 1671 default: 1672 break; 1673 } 1674 1675 err_out: 1676 mutex_unlock(&psp->ras_context.mutex); 1677 1678 return ret; 1679 } 1680 1681 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1682 { 1683 struct ta_ras_shared_memory *ras_cmd; 1684 int ret; 1685 1686 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1687 1688 /* 1689 * TODO: bypass the loading in sriov for now 1690 */ 1691 if (amdgpu_sriov_vf(psp->adev)) 1692 return 0; 1693 1694 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1695 1696 if (amdgpu_ras_intr_triggered()) 1697 return ret; 1698 1699 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1700 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1701 return -EINVAL; 1702 } 1703 1704 if (!ret) { 1705 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1706 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1707 1708 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1709 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1710 dev_warn(psp->adev->dev, 1711 "RAS internal register access blocked\n"); 1712 1713 psp_ras_ta_check_status(psp); 1714 } 1715 1716 return ret; 1717 } 1718 1719 int psp_ras_enable_features(struct psp_context *psp, 1720 union ta_ras_cmd_input *info, bool enable) 1721 { 1722 enum ras_command cmd_id; 1723 int ret; 1724 1725 if (!psp->ras_context.context.initialized || !info) 1726 return -EINVAL; 1727 1728 cmd_id = enable ? 1729 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES; 1730 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL); 1731 if (ret) 1732 return -EINVAL; 1733 1734 return 0; 1735 } 1736 1737 int psp_ras_terminate(struct psp_context *psp) 1738 { 1739 int ret; 1740 1741 /* 1742 * TODO: bypass the terminate in sriov for now 1743 */ 1744 if (amdgpu_sriov_vf(psp->adev)) 1745 return 0; 1746 1747 if (!psp->ras_context.context.initialized) 1748 return 0; 1749 1750 ret = psp_ta_unload(psp, &psp->ras_context.context); 1751 1752 psp->ras_context.context.initialized = false; 1753 1754 mutex_destroy(&psp->ras_context.mutex); 1755 1756 return ret; 1757 } 1758 1759 int psp_ras_initialize(struct psp_context *psp) 1760 { 1761 int ret; 1762 uint32_t boot_cfg = 0xFF; 1763 struct amdgpu_device *adev = psp->adev; 1764 struct ta_ras_shared_memory *ras_cmd; 1765 1766 /* 1767 * TODO: bypass the initialize in sriov for now 1768 */ 1769 if (amdgpu_sriov_vf(adev)) 1770 return 0; 1771 1772 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1773 !adev->psp.ras_context.context.bin_desc.start_addr) { 1774 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1775 return 0; 1776 } 1777 1778 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1779 /* query GECC enablement status from boot config 1780 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1781 */ 1782 ret = psp_boot_config_get(adev, &boot_cfg); 1783 if (ret) 1784 dev_warn(adev->dev, "PSP get boot config failed\n"); 1785 1786 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { 1787 if (!boot_cfg) { 1788 dev_info(adev->dev, "GECC is disabled\n"); 1789 } else { 1790 /* disable GECC in next boot cycle if ras is 1791 * disabled by module parameter amdgpu_ras_enable 1792 * and/or amdgpu_ras_mask, or boot_config_get call 1793 * is failed 1794 */ 1795 ret = psp_boot_config_set(adev, 0); 1796 if (ret) 1797 dev_warn(adev->dev, "PSP set boot config failed\n"); 1798 else 1799 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1800 } 1801 } else { 1802 if (boot_cfg == 1) { 1803 dev_info(adev->dev, "GECC is enabled\n"); 1804 } else { 1805 /* enable GECC in next boot cycle if it is disabled 1806 * in boot config, or force enable GECC if failed to 1807 * get boot configuration 1808 */ 1809 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1810 if (ret) 1811 dev_warn(adev->dev, "PSP set boot config failed\n"); 1812 else 1813 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1814 } 1815 } 1816 } 1817 1818 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1819 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1820 1821 if (!psp->ras_context.context.mem_context.shared_buf) { 1822 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1823 if (ret) 1824 return ret; 1825 } 1826 1827 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1828 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1829 1830 if (amdgpu_ras_is_poison_mode_supported(adev)) 1831 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1832 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1833 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1834 ras_cmd->ras_in_message.init_flags.xcc_mask = 1835 adev->gfx.xcc_mask; 1836 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1837 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 1838 ras_cmd->ras_in_message.init_flags.nps_mode = 1839 adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 1840 1841 ret = psp_ta_load(psp, &psp->ras_context.context); 1842 1843 if (!ret && !ras_cmd->ras_status) { 1844 psp->ras_context.context.initialized = true; 1845 mutex_init(&psp->ras_context.mutex); 1846 } else { 1847 if (ras_cmd->ras_status) 1848 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1849 1850 /* fail to load RAS TA */ 1851 psp->ras_context.context.initialized = false; 1852 } 1853 1854 return ret; 1855 } 1856 1857 int psp_ras_trigger_error(struct psp_context *psp, 1858 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 1859 { 1860 struct amdgpu_device *adev = psp->adev; 1861 int ret; 1862 uint32_t dev_mask; 1863 uint32_t ras_status = 0; 1864 1865 if (!psp->ras_context.context.initialized || !info) 1866 return -EINVAL; 1867 1868 switch (info->block_id) { 1869 case TA_RAS_BLOCK__GFX: 1870 dev_mask = GET_MASK(GC, instance_mask); 1871 break; 1872 case TA_RAS_BLOCK__SDMA: 1873 dev_mask = GET_MASK(SDMA0, instance_mask); 1874 break; 1875 case TA_RAS_BLOCK__VCN: 1876 case TA_RAS_BLOCK__JPEG: 1877 dev_mask = GET_MASK(VCN, instance_mask); 1878 break; 1879 default: 1880 dev_mask = instance_mask; 1881 break; 1882 } 1883 1884 /* reuse sub_block_index for backward compatibility */ 1885 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 1886 dev_mask &= AMDGPU_RAS_INST_MASK; 1887 info->sub_block_index |= dev_mask; 1888 1889 ret = psp_ras_send_cmd(psp, 1890 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status); 1891 if (ret) 1892 return -EINVAL; 1893 1894 /* If err_event_athub occurs error inject was successful, however 1895 * return status from TA is no long reliable 1896 */ 1897 if (amdgpu_ras_intr_triggered()) 1898 return 0; 1899 1900 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 1901 return -EACCES; 1902 else if (ras_status) 1903 return -EINVAL; 1904 1905 return 0; 1906 } 1907 1908 int psp_ras_query_address(struct psp_context *psp, 1909 struct ta_ras_query_address_input *addr_in, 1910 struct ta_ras_query_address_output *addr_out) 1911 { 1912 int ret; 1913 1914 if (!psp->ras_context.context.initialized || 1915 !addr_in || !addr_out) 1916 return -EINVAL; 1917 1918 ret = psp_ras_send_cmd(psp, 1919 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out); 1920 1921 return ret; 1922 } 1923 // ras end 1924 1925 // HDCP start 1926 static int psp_hdcp_initialize(struct psp_context *psp) 1927 { 1928 int ret; 1929 1930 /* 1931 * TODO: bypass the initialize in sriov for now 1932 */ 1933 if (amdgpu_sriov_vf(psp->adev)) 1934 return 0; 1935 1936 /* bypass hdcp initialization if dmu is harvested */ 1937 if (!amdgpu_device_has_display_hardware(psp->adev)) 1938 return 0; 1939 1940 if (!psp->hdcp_context.context.bin_desc.size_bytes || 1941 !psp->hdcp_context.context.bin_desc.start_addr) { 1942 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1943 return 0; 1944 } 1945 1946 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 1947 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1948 1949 if (!psp->hdcp_context.context.mem_context.shared_buf) { 1950 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 1951 if (ret) 1952 return ret; 1953 } 1954 1955 ret = psp_ta_load(psp, &psp->hdcp_context.context); 1956 if (!ret) { 1957 psp->hdcp_context.context.initialized = true; 1958 mutex_init(&psp->hdcp_context.mutex); 1959 } 1960 1961 return ret; 1962 } 1963 1964 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1965 { 1966 /* 1967 * TODO: bypass the loading in sriov for now 1968 */ 1969 if (amdgpu_sriov_vf(psp->adev)) 1970 return 0; 1971 1972 if (!psp->hdcp_context.context.initialized) 1973 return 0; 1974 1975 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 1976 } 1977 1978 static int psp_hdcp_terminate(struct psp_context *psp) 1979 { 1980 int ret; 1981 1982 /* 1983 * TODO: bypass the terminate in sriov for now 1984 */ 1985 if (amdgpu_sriov_vf(psp->adev)) 1986 return 0; 1987 1988 if (!psp->hdcp_context.context.initialized) 1989 return 0; 1990 1991 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 1992 1993 psp->hdcp_context.context.initialized = false; 1994 1995 return ret; 1996 } 1997 // HDCP end 1998 1999 // DTM start 2000 static int psp_dtm_initialize(struct psp_context *psp) 2001 { 2002 int ret; 2003 2004 /* 2005 * TODO: bypass the initialize in sriov for now 2006 */ 2007 if (amdgpu_sriov_vf(psp->adev)) 2008 return 0; 2009 2010 /* bypass dtm initialization if dmu is harvested */ 2011 if (!amdgpu_device_has_display_hardware(psp->adev)) 2012 return 0; 2013 2014 if (!psp->dtm_context.context.bin_desc.size_bytes || 2015 !psp->dtm_context.context.bin_desc.start_addr) { 2016 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 2017 return 0; 2018 } 2019 2020 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 2021 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2022 2023 if (!psp->dtm_context.context.mem_context.shared_buf) { 2024 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 2025 if (ret) 2026 return ret; 2027 } 2028 2029 ret = psp_ta_load(psp, &psp->dtm_context.context); 2030 if (!ret) { 2031 psp->dtm_context.context.initialized = true; 2032 mutex_init(&psp->dtm_context.mutex); 2033 } 2034 2035 return ret; 2036 } 2037 2038 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2039 { 2040 /* 2041 * TODO: bypass the loading in sriov for now 2042 */ 2043 if (amdgpu_sriov_vf(psp->adev)) 2044 return 0; 2045 2046 if (!psp->dtm_context.context.initialized) 2047 return 0; 2048 2049 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 2050 } 2051 2052 static int psp_dtm_terminate(struct psp_context *psp) 2053 { 2054 int ret; 2055 2056 /* 2057 * TODO: bypass the terminate in sriov for now 2058 */ 2059 if (amdgpu_sriov_vf(psp->adev)) 2060 return 0; 2061 2062 if (!psp->dtm_context.context.initialized) 2063 return 0; 2064 2065 ret = psp_ta_unload(psp, &psp->dtm_context.context); 2066 2067 psp->dtm_context.context.initialized = false; 2068 2069 return ret; 2070 } 2071 // DTM end 2072 2073 // RAP start 2074 static int psp_rap_initialize(struct psp_context *psp) 2075 { 2076 int ret; 2077 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 2078 2079 /* 2080 * TODO: bypass the initialize in sriov for now 2081 */ 2082 if (amdgpu_sriov_vf(psp->adev)) 2083 return 0; 2084 2085 if (!psp->rap_context.context.bin_desc.size_bytes || 2086 !psp->rap_context.context.bin_desc.start_addr) { 2087 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 2088 return 0; 2089 } 2090 2091 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 2092 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2093 2094 if (!psp->rap_context.context.mem_context.shared_buf) { 2095 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 2096 if (ret) 2097 return ret; 2098 } 2099 2100 ret = psp_ta_load(psp, &psp->rap_context.context); 2101 if (!ret) { 2102 psp->rap_context.context.initialized = true; 2103 mutex_init(&psp->rap_context.mutex); 2104 } else 2105 return ret; 2106 2107 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2108 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2109 psp_rap_terminate(psp); 2110 /* free rap shared memory */ 2111 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2112 2113 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2114 ret, status); 2115 2116 return ret; 2117 } 2118 2119 return 0; 2120 } 2121 2122 static int psp_rap_terminate(struct psp_context *psp) 2123 { 2124 int ret; 2125 2126 if (!psp->rap_context.context.initialized) 2127 return 0; 2128 2129 ret = psp_ta_unload(psp, &psp->rap_context.context); 2130 2131 psp->rap_context.context.initialized = false; 2132 2133 return ret; 2134 } 2135 2136 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2137 { 2138 struct ta_rap_shared_memory *rap_cmd; 2139 int ret = 0; 2140 2141 if (!psp->rap_context.context.initialized) 2142 return 0; 2143 2144 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2145 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2146 return -EINVAL; 2147 2148 mutex_lock(&psp->rap_context.mutex); 2149 2150 rap_cmd = (struct ta_rap_shared_memory *) 2151 psp->rap_context.context.mem_context.shared_buf; 2152 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2153 2154 rap_cmd->cmd_id = ta_cmd_id; 2155 rap_cmd->validation_method_id = METHOD_A; 2156 2157 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2158 if (ret) 2159 goto out_unlock; 2160 2161 if (status) 2162 *status = rap_cmd->rap_status; 2163 2164 out_unlock: 2165 mutex_unlock(&psp->rap_context.mutex); 2166 2167 return ret; 2168 } 2169 // RAP end 2170 2171 /* securedisplay start */ 2172 static int psp_securedisplay_initialize(struct psp_context *psp) 2173 { 2174 int ret; 2175 struct ta_securedisplay_cmd *securedisplay_cmd; 2176 2177 /* 2178 * TODO: bypass the initialize in sriov for now 2179 */ 2180 if (amdgpu_sriov_vf(psp->adev)) 2181 return 0; 2182 2183 /* bypass securedisplay initialization if dmu is harvested */ 2184 if (!amdgpu_device_has_display_hardware(psp->adev)) 2185 return 0; 2186 2187 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2188 !psp->securedisplay_context.context.bin_desc.start_addr) { 2189 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); 2190 return 0; 2191 } 2192 2193 psp->securedisplay_context.context.mem_context.shared_mem_size = 2194 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2195 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2196 2197 if (!psp->securedisplay_context.context.initialized) { 2198 ret = psp_ta_init_shared_buf(psp, 2199 &psp->securedisplay_context.context.mem_context); 2200 if (ret) 2201 return ret; 2202 } 2203 2204 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2205 if (!ret) { 2206 psp->securedisplay_context.context.initialized = true; 2207 mutex_init(&psp->securedisplay_context.mutex); 2208 } else 2209 return ret; 2210 2211 mutex_lock(&psp->securedisplay_context.mutex); 2212 2213 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2214 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2215 2216 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2217 2218 mutex_unlock(&psp->securedisplay_context.mutex); 2219 2220 if (ret) { 2221 psp_securedisplay_terminate(psp); 2222 /* free securedisplay shared memory */ 2223 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2224 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2225 return -EINVAL; 2226 } 2227 2228 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2229 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2230 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2231 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2232 /* don't try again */ 2233 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2234 } 2235 2236 return 0; 2237 } 2238 2239 static int psp_securedisplay_terminate(struct psp_context *psp) 2240 { 2241 int ret; 2242 2243 /* 2244 * TODO:bypass the terminate in sriov for now 2245 */ 2246 if (amdgpu_sriov_vf(psp->adev)) 2247 return 0; 2248 2249 if (!psp->securedisplay_context.context.initialized) 2250 return 0; 2251 2252 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2253 2254 psp->securedisplay_context.context.initialized = false; 2255 2256 return ret; 2257 } 2258 2259 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2260 { 2261 int ret; 2262 2263 if (!psp->securedisplay_context.context.initialized) 2264 return -EINVAL; 2265 2266 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2267 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC && 2268 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2) 2269 return -EINVAL; 2270 2271 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2272 2273 return ret; 2274 } 2275 /* SECUREDISPLAY end */ 2276 2277 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2278 { 2279 struct psp_context *psp = &adev->psp; 2280 int ret = 0; 2281 2282 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2283 ret = psp->funcs->wait_for_bootloader(psp); 2284 2285 return ret; 2286 } 2287 2288 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2289 { 2290 if (psp->funcs && 2291 psp->funcs->get_ras_capability) { 2292 return psp->funcs->get_ras_capability(psp); 2293 } else { 2294 return false; 2295 } 2296 } 2297 2298 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) 2299 { 2300 struct psp_context *psp = &adev->psp; 2301 2302 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2303 return false; 2304 2305 if (psp->funcs && psp->funcs->is_reload_needed) 2306 return psp->funcs->is_reload_needed(psp); 2307 2308 return false; 2309 } 2310 2311 static int psp_hw_start(struct psp_context *psp) 2312 { 2313 struct amdgpu_device *adev = psp->adev; 2314 int ret; 2315 2316 if (!amdgpu_sriov_vf(adev)) { 2317 if ((is_psp_fw_valid(psp->kdb)) && 2318 (psp->funcs->bootloader_load_kdb != NULL)) { 2319 ret = psp_bootloader_load_kdb(psp); 2320 if (ret) { 2321 dev_err(adev->dev, "PSP load kdb failed!\n"); 2322 return ret; 2323 } 2324 } 2325 2326 if ((is_psp_fw_valid(psp->spl)) && 2327 (psp->funcs->bootloader_load_spl != NULL)) { 2328 ret = psp_bootloader_load_spl(psp); 2329 if (ret) { 2330 dev_err(adev->dev, "PSP load spl failed!\n"); 2331 return ret; 2332 } 2333 } 2334 2335 if ((is_psp_fw_valid(psp->sys)) && 2336 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2337 ret = psp_bootloader_load_sysdrv(psp); 2338 if (ret) { 2339 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2340 return ret; 2341 } 2342 } 2343 2344 if ((is_psp_fw_valid(psp->soc_drv)) && 2345 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2346 ret = psp_bootloader_load_soc_drv(psp); 2347 if (ret) { 2348 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2349 return ret; 2350 } 2351 } 2352 2353 if ((is_psp_fw_valid(psp->intf_drv)) && 2354 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2355 ret = psp_bootloader_load_intf_drv(psp); 2356 if (ret) { 2357 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2358 return ret; 2359 } 2360 } 2361 2362 if ((is_psp_fw_valid(psp->dbg_drv)) && 2363 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2364 ret = psp_bootloader_load_dbg_drv(psp); 2365 if (ret) { 2366 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2367 return ret; 2368 } 2369 } 2370 2371 if ((is_psp_fw_valid(psp->ras_drv)) && 2372 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2373 ret = psp_bootloader_load_ras_drv(psp); 2374 if (ret) { 2375 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2376 return ret; 2377 } 2378 } 2379 2380 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2381 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2382 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2383 if (ret) { 2384 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2385 return ret; 2386 } 2387 } 2388 2389 if ((is_psp_fw_valid(psp->sos)) && 2390 (psp->funcs->bootloader_load_sos != NULL)) { 2391 ret = psp_bootloader_load_sos(psp); 2392 if (ret) { 2393 dev_err(adev->dev, "PSP load sos failed!\n"); 2394 return ret; 2395 } 2396 } 2397 } 2398 2399 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2400 if (ret) { 2401 dev_err(adev->dev, "PSP create ring failed!\n"); 2402 return ret; 2403 } 2404 2405 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2406 goto skip_pin_bo; 2407 2408 if (!psp->boot_time_tmr || psp->autoload_supported) { 2409 ret = psp_tmr_init(psp); 2410 if (ret) { 2411 dev_err(adev->dev, "PSP tmr init failed!\n"); 2412 return ret; 2413 } 2414 } 2415 2416 skip_pin_bo: 2417 /* 2418 * For ASICs with DF Cstate management centralized 2419 * to PMFW, TMR setup should be performed after PMFW 2420 * loaded and before other non-psp firmware loaded. 2421 */ 2422 if (psp->pmfw_centralized_cstate_management) { 2423 ret = psp_load_smu_fw(psp); 2424 if (ret) 2425 return ret; 2426 } 2427 2428 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2429 ret = psp_tmr_load(psp); 2430 if (ret) { 2431 dev_err(adev->dev, "PSP load tmr failed!\n"); 2432 return ret; 2433 } 2434 } 2435 2436 return 0; 2437 } 2438 2439 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2440 enum psp_gfx_fw_type *type) 2441 { 2442 switch (ucode->ucode_id) { 2443 case AMDGPU_UCODE_ID_CAP: 2444 *type = GFX_FW_TYPE_CAP; 2445 break; 2446 case AMDGPU_UCODE_ID_SDMA0: 2447 *type = GFX_FW_TYPE_SDMA0; 2448 break; 2449 case AMDGPU_UCODE_ID_SDMA1: 2450 *type = GFX_FW_TYPE_SDMA1; 2451 break; 2452 case AMDGPU_UCODE_ID_SDMA2: 2453 *type = GFX_FW_TYPE_SDMA2; 2454 break; 2455 case AMDGPU_UCODE_ID_SDMA3: 2456 *type = GFX_FW_TYPE_SDMA3; 2457 break; 2458 case AMDGPU_UCODE_ID_SDMA4: 2459 *type = GFX_FW_TYPE_SDMA4; 2460 break; 2461 case AMDGPU_UCODE_ID_SDMA5: 2462 *type = GFX_FW_TYPE_SDMA5; 2463 break; 2464 case AMDGPU_UCODE_ID_SDMA6: 2465 *type = GFX_FW_TYPE_SDMA6; 2466 break; 2467 case AMDGPU_UCODE_ID_SDMA7: 2468 *type = GFX_FW_TYPE_SDMA7; 2469 break; 2470 case AMDGPU_UCODE_ID_CP_MES: 2471 *type = GFX_FW_TYPE_CP_MES; 2472 break; 2473 case AMDGPU_UCODE_ID_CP_MES_DATA: 2474 *type = GFX_FW_TYPE_MES_STACK; 2475 break; 2476 case AMDGPU_UCODE_ID_CP_MES1: 2477 *type = GFX_FW_TYPE_CP_MES_KIQ; 2478 break; 2479 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2480 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2481 break; 2482 case AMDGPU_UCODE_ID_CP_CE: 2483 *type = GFX_FW_TYPE_CP_CE; 2484 break; 2485 case AMDGPU_UCODE_ID_CP_PFP: 2486 *type = GFX_FW_TYPE_CP_PFP; 2487 break; 2488 case AMDGPU_UCODE_ID_CP_ME: 2489 *type = GFX_FW_TYPE_CP_ME; 2490 break; 2491 case AMDGPU_UCODE_ID_CP_MEC1: 2492 *type = GFX_FW_TYPE_CP_MEC; 2493 break; 2494 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2495 *type = GFX_FW_TYPE_CP_MEC_ME1; 2496 break; 2497 case AMDGPU_UCODE_ID_CP_MEC2: 2498 *type = GFX_FW_TYPE_CP_MEC; 2499 break; 2500 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2501 *type = GFX_FW_TYPE_CP_MEC_ME2; 2502 break; 2503 case AMDGPU_UCODE_ID_RLC_P: 2504 *type = GFX_FW_TYPE_RLC_P; 2505 break; 2506 case AMDGPU_UCODE_ID_RLC_V: 2507 *type = GFX_FW_TYPE_RLC_V; 2508 break; 2509 case AMDGPU_UCODE_ID_RLC_G: 2510 *type = GFX_FW_TYPE_RLC_G; 2511 break; 2512 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2513 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2514 break; 2515 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2516 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2517 break; 2518 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2519 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2520 break; 2521 case AMDGPU_UCODE_ID_RLC_IRAM: 2522 *type = GFX_FW_TYPE_RLC_IRAM; 2523 break; 2524 case AMDGPU_UCODE_ID_RLC_DRAM: 2525 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2526 break; 2527 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2528 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2529 break; 2530 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2531 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2532 break; 2533 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2534 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2535 break; 2536 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2537 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2538 break; 2539 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2540 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2541 break; 2542 case AMDGPU_UCODE_ID_SMC: 2543 *type = GFX_FW_TYPE_SMU; 2544 break; 2545 case AMDGPU_UCODE_ID_PPTABLE: 2546 *type = GFX_FW_TYPE_PPTABLE; 2547 break; 2548 case AMDGPU_UCODE_ID_UVD: 2549 *type = GFX_FW_TYPE_UVD; 2550 break; 2551 case AMDGPU_UCODE_ID_UVD1: 2552 *type = GFX_FW_TYPE_UVD1; 2553 break; 2554 case AMDGPU_UCODE_ID_VCE: 2555 *type = GFX_FW_TYPE_VCE; 2556 break; 2557 case AMDGPU_UCODE_ID_VCN: 2558 *type = GFX_FW_TYPE_VCN; 2559 break; 2560 case AMDGPU_UCODE_ID_VCN1: 2561 *type = GFX_FW_TYPE_VCN1; 2562 break; 2563 case AMDGPU_UCODE_ID_DMCU_ERAM: 2564 *type = GFX_FW_TYPE_DMCU_ERAM; 2565 break; 2566 case AMDGPU_UCODE_ID_DMCU_INTV: 2567 *type = GFX_FW_TYPE_DMCU_ISR; 2568 break; 2569 case AMDGPU_UCODE_ID_VCN0_RAM: 2570 *type = GFX_FW_TYPE_VCN0_RAM; 2571 break; 2572 case AMDGPU_UCODE_ID_VCN1_RAM: 2573 *type = GFX_FW_TYPE_VCN1_RAM; 2574 break; 2575 case AMDGPU_UCODE_ID_DMCUB: 2576 *type = GFX_FW_TYPE_DMUB; 2577 break; 2578 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2579 case AMDGPU_UCODE_ID_SDMA_RS64: 2580 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2581 break; 2582 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2583 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2584 break; 2585 case AMDGPU_UCODE_ID_IMU_I: 2586 *type = GFX_FW_TYPE_IMU_I; 2587 break; 2588 case AMDGPU_UCODE_ID_IMU_D: 2589 *type = GFX_FW_TYPE_IMU_D; 2590 break; 2591 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2592 *type = GFX_FW_TYPE_RS64_PFP; 2593 break; 2594 case AMDGPU_UCODE_ID_CP_RS64_ME: 2595 *type = GFX_FW_TYPE_RS64_ME; 2596 break; 2597 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2598 *type = GFX_FW_TYPE_RS64_MEC; 2599 break; 2600 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2601 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2602 break; 2603 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2604 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2605 break; 2606 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2607 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2608 break; 2609 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2610 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2611 break; 2612 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2613 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2614 break; 2615 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2616 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2617 break; 2618 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2619 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2620 break; 2621 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2622 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2623 break; 2624 case AMDGPU_UCODE_ID_VPE_CTX: 2625 *type = GFX_FW_TYPE_VPEC_FW1; 2626 break; 2627 case AMDGPU_UCODE_ID_VPE_CTL: 2628 *type = GFX_FW_TYPE_VPEC_FW2; 2629 break; 2630 case AMDGPU_UCODE_ID_VPE: 2631 *type = GFX_FW_TYPE_VPE; 2632 break; 2633 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2634 *type = GFX_FW_TYPE_UMSCH_UCODE; 2635 break; 2636 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2637 *type = GFX_FW_TYPE_UMSCH_DATA; 2638 break; 2639 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2640 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2641 break; 2642 case AMDGPU_UCODE_ID_P2S_TABLE: 2643 *type = GFX_FW_TYPE_P2S_TABLE; 2644 break; 2645 case AMDGPU_UCODE_ID_JPEG_RAM: 2646 *type = GFX_FW_TYPE_JPEG_RAM; 2647 break; 2648 case AMDGPU_UCODE_ID_ISP: 2649 *type = GFX_FW_TYPE_ISP; 2650 break; 2651 case AMDGPU_UCODE_ID_MAXIMUM: 2652 default: 2653 return -EINVAL; 2654 } 2655 2656 return 0; 2657 } 2658 2659 static void psp_print_fw_hdr(struct psp_context *psp, 2660 struct amdgpu_firmware_info *ucode) 2661 { 2662 struct amdgpu_device *adev = psp->adev; 2663 struct common_firmware_header *hdr; 2664 2665 switch (ucode->ucode_id) { 2666 case AMDGPU_UCODE_ID_SDMA0: 2667 case AMDGPU_UCODE_ID_SDMA1: 2668 case AMDGPU_UCODE_ID_SDMA2: 2669 case AMDGPU_UCODE_ID_SDMA3: 2670 case AMDGPU_UCODE_ID_SDMA4: 2671 case AMDGPU_UCODE_ID_SDMA5: 2672 case AMDGPU_UCODE_ID_SDMA6: 2673 case AMDGPU_UCODE_ID_SDMA7: 2674 hdr = (struct common_firmware_header *) 2675 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2676 amdgpu_ucode_print_sdma_hdr(hdr); 2677 break; 2678 case AMDGPU_UCODE_ID_CP_CE: 2679 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2680 amdgpu_ucode_print_gfx_hdr(hdr); 2681 break; 2682 case AMDGPU_UCODE_ID_CP_PFP: 2683 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2684 amdgpu_ucode_print_gfx_hdr(hdr); 2685 break; 2686 case AMDGPU_UCODE_ID_CP_ME: 2687 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2688 amdgpu_ucode_print_gfx_hdr(hdr); 2689 break; 2690 case AMDGPU_UCODE_ID_CP_MEC1: 2691 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2692 amdgpu_ucode_print_gfx_hdr(hdr); 2693 break; 2694 case AMDGPU_UCODE_ID_RLC_G: 2695 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2696 amdgpu_ucode_print_rlc_hdr(hdr); 2697 break; 2698 case AMDGPU_UCODE_ID_SMC: 2699 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2700 amdgpu_ucode_print_smc_hdr(hdr); 2701 break; 2702 default: 2703 break; 2704 } 2705 } 2706 2707 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2708 struct amdgpu_firmware_info *ucode, 2709 struct psp_gfx_cmd_resp *cmd) 2710 { 2711 int ret; 2712 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2713 2714 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2715 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2716 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2717 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2718 2719 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2720 if (ret) 2721 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2722 2723 return ret; 2724 } 2725 2726 int psp_execute_ip_fw_load(struct psp_context *psp, 2727 struct amdgpu_firmware_info *ucode) 2728 { 2729 int ret = 0; 2730 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2731 2732 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2733 if (!ret) { 2734 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2735 psp->fence_buf_mc_addr); 2736 } 2737 2738 release_psp_cmd_buf(psp); 2739 2740 return ret; 2741 } 2742 2743 static int psp_load_p2s_table(struct psp_context *psp) 2744 { 2745 int ret; 2746 struct amdgpu_device *adev = psp->adev; 2747 struct amdgpu_firmware_info *ucode = 2748 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2749 2750 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2751 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2752 return 0; 2753 2754 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 2755 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 2756 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2757 0x0036003C; 2758 if (psp->sos.fw_version < supp_vers) 2759 return 0; 2760 } 2761 2762 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2763 return 0; 2764 2765 ret = psp_execute_ip_fw_load(psp, ucode); 2766 2767 return ret; 2768 } 2769 2770 static int psp_load_smu_fw(struct psp_context *psp) 2771 { 2772 int ret; 2773 struct amdgpu_device *adev = psp->adev; 2774 struct amdgpu_firmware_info *ucode = 2775 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2776 struct amdgpu_ras *ras = psp->ras_context.ras; 2777 2778 /* 2779 * Skip SMU FW reloading in case of using BACO for runpm only, 2780 * as SMU is always alive. 2781 */ 2782 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2783 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2784 return 0; 2785 2786 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2787 return 0; 2788 2789 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2790 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2791 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2792 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2793 if (ret) 2794 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2795 } 2796 2797 ret = psp_execute_ip_fw_load(psp, ucode); 2798 2799 if (ret) 2800 dev_err(adev->dev, "PSP load smu failed!\n"); 2801 2802 return ret; 2803 } 2804 2805 static bool fw_load_skip_check(struct psp_context *psp, 2806 struct amdgpu_firmware_info *ucode) 2807 { 2808 if (!ucode->fw || !ucode->ucode_size) 2809 return true; 2810 2811 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2812 return true; 2813 2814 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2815 (psp_smu_reload_quirk(psp) || 2816 psp->autoload_supported || 2817 psp->pmfw_centralized_cstate_management)) 2818 return true; 2819 2820 if (amdgpu_sriov_vf(psp->adev) && 2821 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2822 return true; 2823 2824 if (psp->autoload_supported && 2825 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2826 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2827 /* skip mec JT when autoload is enabled */ 2828 return true; 2829 2830 return false; 2831 } 2832 2833 int psp_load_fw_list(struct psp_context *psp, 2834 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2835 { 2836 int ret = 0, i; 2837 struct amdgpu_firmware_info *ucode; 2838 2839 for (i = 0; i < ucode_count; ++i) { 2840 ucode = ucode_list[i]; 2841 psp_print_fw_hdr(psp, ucode); 2842 ret = psp_execute_ip_fw_load(psp, ucode); 2843 if (ret) 2844 return ret; 2845 } 2846 return ret; 2847 } 2848 2849 static int psp_load_non_psp_fw(struct psp_context *psp) 2850 { 2851 int i, ret; 2852 struct amdgpu_firmware_info *ucode; 2853 struct amdgpu_device *adev = psp->adev; 2854 2855 if (psp->autoload_supported && 2856 !psp->pmfw_centralized_cstate_management) { 2857 ret = psp_load_smu_fw(psp); 2858 if (ret) 2859 return ret; 2860 } 2861 2862 /* Load P2S table first if it's available */ 2863 psp_load_p2s_table(psp); 2864 2865 for (i = 0; i < adev->firmware.max_ucodes; i++) { 2866 ucode = &adev->firmware.ucode[i]; 2867 2868 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2869 !fw_load_skip_check(psp, ucode)) { 2870 ret = psp_load_smu_fw(psp); 2871 if (ret) 2872 return ret; 2873 continue; 2874 } 2875 2876 if (fw_load_skip_check(psp, ucode)) 2877 continue; 2878 2879 if (psp->autoload_supported && 2880 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 2881 IP_VERSION(11, 0, 7) || 2882 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2883 IP_VERSION(11, 0, 11) || 2884 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2885 IP_VERSION(11, 0, 12)) && 2886 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2887 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2888 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 2889 /* PSP only receive one SDMA fw for sienna_cichlid, 2890 * as all four sdma fw are same 2891 */ 2892 continue; 2893 2894 psp_print_fw_hdr(psp, ucode); 2895 2896 ret = psp_execute_ip_fw_load(psp, ucode); 2897 if (ret) 2898 return ret; 2899 2900 /* Start rlc autoload after psp received all the gfx firmware */ 2901 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2902 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 2903 ret = psp_rlc_autoload_start(psp); 2904 if (ret) { 2905 dev_err(adev->dev, "Failed to start rlc autoload\n"); 2906 return ret; 2907 } 2908 } 2909 } 2910 2911 return 0; 2912 } 2913 2914 static int psp_load_fw(struct amdgpu_device *adev) 2915 { 2916 int ret; 2917 struct psp_context *psp = &adev->psp; 2918 2919 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2920 /* should not destroy ring, only stop */ 2921 psp_ring_stop(psp, PSP_RING_TYPE__KM); 2922 } else { 2923 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2924 2925 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2926 if (ret) { 2927 dev_err(adev->dev, "PSP ring init failed!\n"); 2928 goto failed; 2929 } 2930 } 2931 2932 ret = psp_hw_start(psp); 2933 if (ret) 2934 goto failed; 2935 2936 ret = psp_load_non_psp_fw(psp); 2937 if (ret) 2938 goto failed1; 2939 2940 ret = psp_asd_initialize(psp); 2941 if (ret) { 2942 dev_err(adev->dev, "PSP load asd failed!\n"); 2943 goto failed1; 2944 } 2945 2946 ret = psp_rl_load(adev); 2947 if (ret) { 2948 dev_err(adev->dev, "PSP load RL failed!\n"); 2949 goto failed1; 2950 } 2951 2952 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2953 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2954 ret = psp_xgmi_initialize(psp, false, true); 2955 /* Warning the XGMI seesion initialize failure 2956 * Instead of stop driver initialization 2957 */ 2958 if (ret) 2959 dev_err(psp->adev->dev, 2960 "XGMI: Failed to initialize XGMI session\n"); 2961 } 2962 } 2963 2964 if (psp->ta_fw) { 2965 ret = psp_ras_initialize(psp); 2966 if (ret) 2967 dev_err(psp->adev->dev, 2968 "RAS: Failed to initialize RAS\n"); 2969 2970 ret = psp_hdcp_initialize(psp); 2971 if (ret) 2972 dev_err(psp->adev->dev, 2973 "HDCP: Failed to initialize HDCP\n"); 2974 2975 ret = psp_dtm_initialize(psp); 2976 if (ret) 2977 dev_err(psp->adev->dev, 2978 "DTM: Failed to initialize DTM\n"); 2979 2980 ret = psp_rap_initialize(psp); 2981 if (ret) 2982 dev_err(psp->adev->dev, 2983 "RAP: Failed to initialize RAP\n"); 2984 2985 ret = psp_securedisplay_initialize(psp); 2986 if (ret) 2987 dev_err(psp->adev->dev, 2988 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 2989 } 2990 2991 return 0; 2992 2993 failed1: 2994 psp_free_shared_bufs(psp); 2995 failed: 2996 /* 2997 * all cleanup jobs (xgmi terminate, ras terminate, 2998 * ring destroy, cmd/fence/fw buffers destory, 2999 * psp->cmd destory) are delayed to psp_hw_fini 3000 */ 3001 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3002 return ret; 3003 } 3004 3005 static int psp_hw_init(struct amdgpu_ip_block *ip_block) 3006 { 3007 int ret; 3008 struct amdgpu_device *adev = ip_block->adev; 3009 3010 mutex_lock(&adev->firmware.mutex); 3011 /* 3012 * This sequence is just used on hw_init only once, no need on 3013 * resume. 3014 */ 3015 ret = amdgpu_ucode_init_bo(adev); 3016 if (ret) 3017 goto failed; 3018 3019 ret = psp_load_fw(adev); 3020 if (ret) { 3021 dev_err(adev->dev, "PSP firmware loading failed\n"); 3022 goto failed; 3023 } 3024 3025 mutex_unlock(&adev->firmware.mutex); 3026 return 0; 3027 3028 failed: 3029 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 3030 mutex_unlock(&adev->firmware.mutex); 3031 return -EINVAL; 3032 } 3033 3034 static int psp_hw_fini(struct amdgpu_ip_block *ip_block) 3035 { 3036 struct amdgpu_device *adev = ip_block->adev; 3037 struct psp_context *psp = &adev->psp; 3038 3039 if (psp->ta_fw) { 3040 psp_ras_terminate(psp); 3041 psp_securedisplay_terminate(psp); 3042 psp_rap_terminate(psp); 3043 psp_dtm_terminate(psp); 3044 psp_hdcp_terminate(psp); 3045 3046 if (adev->gmc.xgmi.num_physical_nodes > 1) 3047 psp_xgmi_terminate(psp); 3048 } 3049 3050 psp_asd_terminate(psp); 3051 psp_tmr_terminate(psp); 3052 3053 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3054 3055 return 0; 3056 } 3057 3058 static int psp_suspend(struct amdgpu_ip_block *ip_block) 3059 { 3060 int ret = 0; 3061 struct amdgpu_device *adev = ip_block->adev; 3062 struct psp_context *psp = &adev->psp; 3063 3064 if (adev->gmc.xgmi.num_physical_nodes > 1 && 3065 psp->xgmi_context.context.initialized) { 3066 ret = psp_xgmi_terminate(psp); 3067 if (ret) { 3068 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 3069 goto out; 3070 } 3071 } 3072 3073 if (psp->ta_fw) { 3074 ret = psp_ras_terminate(psp); 3075 if (ret) { 3076 dev_err(adev->dev, "Failed to terminate ras ta\n"); 3077 goto out; 3078 } 3079 ret = psp_hdcp_terminate(psp); 3080 if (ret) { 3081 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 3082 goto out; 3083 } 3084 ret = psp_dtm_terminate(psp); 3085 if (ret) { 3086 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 3087 goto out; 3088 } 3089 ret = psp_rap_terminate(psp); 3090 if (ret) { 3091 dev_err(adev->dev, "Failed to terminate rap ta\n"); 3092 goto out; 3093 } 3094 ret = psp_securedisplay_terminate(psp); 3095 if (ret) { 3096 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 3097 goto out; 3098 } 3099 } 3100 3101 ret = psp_asd_terminate(psp); 3102 if (ret) { 3103 dev_err(adev->dev, "Failed to terminate asd\n"); 3104 goto out; 3105 } 3106 3107 ret = psp_tmr_terminate(psp); 3108 if (ret) { 3109 dev_err(adev->dev, "Failed to terminate tmr\n"); 3110 goto out; 3111 } 3112 3113 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 3114 if (ret) 3115 dev_err(adev->dev, "PSP ring stop failed\n"); 3116 3117 out: 3118 return ret; 3119 } 3120 3121 static int psp_resume(struct amdgpu_ip_block *ip_block) 3122 { 3123 int ret; 3124 struct amdgpu_device *adev = ip_block->adev; 3125 struct psp_context *psp = &adev->psp; 3126 3127 dev_info(adev->dev, "PSP is resuming...\n"); 3128 3129 if (psp->mem_train_ctx.enable_mem_training) { 3130 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3131 if (ret) { 3132 dev_err(adev->dev, "Failed to process memory training!\n"); 3133 return ret; 3134 } 3135 } 3136 3137 mutex_lock(&adev->firmware.mutex); 3138 3139 ret = psp_hw_start(psp); 3140 if (ret) 3141 goto failed; 3142 3143 ret = psp_load_non_psp_fw(psp); 3144 if (ret) 3145 goto failed; 3146 3147 ret = psp_asd_initialize(psp); 3148 if (ret) { 3149 dev_err(adev->dev, "PSP load asd failed!\n"); 3150 goto failed; 3151 } 3152 3153 ret = psp_rl_load(adev); 3154 if (ret) { 3155 dev_err(adev->dev, "PSP load RL failed!\n"); 3156 goto failed; 3157 } 3158 3159 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3160 ret = psp_xgmi_initialize(psp, false, true); 3161 /* Warning the XGMI seesion initialize failure 3162 * Instead of stop driver initialization 3163 */ 3164 if (ret) 3165 dev_err(psp->adev->dev, 3166 "XGMI: Failed to initialize XGMI session\n"); 3167 } 3168 3169 if (psp->ta_fw) { 3170 ret = psp_ras_initialize(psp); 3171 if (ret) 3172 dev_err(psp->adev->dev, 3173 "RAS: Failed to initialize RAS\n"); 3174 3175 ret = psp_hdcp_initialize(psp); 3176 if (ret) 3177 dev_err(psp->adev->dev, 3178 "HDCP: Failed to initialize HDCP\n"); 3179 3180 ret = psp_dtm_initialize(psp); 3181 if (ret) 3182 dev_err(psp->adev->dev, 3183 "DTM: Failed to initialize DTM\n"); 3184 3185 ret = psp_rap_initialize(psp); 3186 if (ret) 3187 dev_err(psp->adev->dev, 3188 "RAP: Failed to initialize RAP\n"); 3189 3190 ret = psp_securedisplay_initialize(psp); 3191 if (ret) 3192 dev_err(psp->adev->dev, 3193 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3194 } 3195 3196 mutex_unlock(&adev->firmware.mutex); 3197 3198 return 0; 3199 3200 failed: 3201 dev_err(adev->dev, "PSP resume failed\n"); 3202 mutex_unlock(&adev->firmware.mutex); 3203 return ret; 3204 } 3205 3206 int psp_gpu_reset(struct amdgpu_device *adev) 3207 { 3208 int ret; 3209 3210 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3211 return 0; 3212 3213 mutex_lock(&adev->psp.mutex); 3214 ret = psp_mode1_reset(&adev->psp); 3215 mutex_unlock(&adev->psp.mutex); 3216 3217 return ret; 3218 } 3219 3220 int psp_rlc_autoload_start(struct psp_context *psp) 3221 { 3222 int ret; 3223 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3224 3225 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3226 3227 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3228 psp->fence_buf_mc_addr); 3229 3230 release_psp_cmd_buf(psp); 3231 3232 return ret; 3233 } 3234 3235 int psp_ring_cmd_submit(struct psp_context *psp, 3236 uint64_t cmd_buf_mc_addr, 3237 uint64_t fence_mc_addr, 3238 int index) 3239 { 3240 unsigned int psp_write_ptr_reg = 0; 3241 struct psp_gfx_rb_frame *write_frame; 3242 struct psp_ring *ring = &psp->km_ring; 3243 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3244 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3245 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3246 struct amdgpu_device *adev = psp->adev; 3247 uint32_t ring_size_dw = ring->ring_size / 4; 3248 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3249 3250 /* KM (GPCOM) prepare write pointer */ 3251 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3252 3253 /* Update KM RB frame pointer to new frame */ 3254 /* write_frame ptr increments by size of rb_frame in bytes */ 3255 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3256 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3257 write_frame = ring_buffer_start; 3258 else 3259 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3260 /* Check invalid write_frame ptr address */ 3261 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3262 dev_err(adev->dev, 3263 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3264 ring_buffer_start, ring_buffer_end, write_frame); 3265 dev_err(adev->dev, 3266 "write_frame is pointing to address out of bounds\n"); 3267 return -EINVAL; 3268 } 3269 3270 /* Initialize KM RB frame */ 3271 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3272 3273 /* Update KM RB frame */ 3274 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3275 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3276 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3277 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3278 write_frame->fence_value = index; 3279 amdgpu_device_flush_hdp(adev, NULL); 3280 3281 /* Update the write Pointer in DWORDs */ 3282 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3283 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3284 return 0; 3285 } 3286 3287 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3288 { 3289 struct amdgpu_device *adev = psp->adev; 3290 const struct psp_firmware_header_v1_0 *asd_hdr; 3291 int err = 0; 3292 3293 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name); 3294 if (err) 3295 goto out; 3296 3297 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3298 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3299 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3300 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3301 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3302 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3303 return 0; 3304 out: 3305 amdgpu_ucode_release(&adev->psp.asd_fw); 3306 return err; 3307 } 3308 3309 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3310 { 3311 struct amdgpu_device *adev = psp->adev; 3312 const struct psp_firmware_header_v1_0 *toc_hdr; 3313 int err = 0; 3314 3315 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name); 3316 if (err) 3317 goto out; 3318 3319 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3320 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3321 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3322 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3323 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3324 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3325 return 0; 3326 out: 3327 amdgpu_ucode_release(&adev->psp.toc_fw); 3328 return err; 3329 } 3330 3331 static int parse_sos_bin_descriptor(struct psp_context *psp, 3332 const struct psp_fw_bin_desc *desc, 3333 const struct psp_firmware_header_v2_0 *sos_hdr) 3334 { 3335 uint8_t *ucode_start_addr = NULL; 3336 3337 if (!psp || !desc || !sos_hdr) 3338 return -EINVAL; 3339 3340 ucode_start_addr = (uint8_t *)sos_hdr + 3341 le32_to_cpu(desc->offset_bytes) + 3342 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3343 3344 switch (desc->fw_type) { 3345 case PSP_FW_TYPE_PSP_SOS: 3346 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3347 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3348 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3349 psp->sos.start_addr = ucode_start_addr; 3350 break; 3351 case PSP_FW_TYPE_PSP_SYS_DRV: 3352 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3353 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3354 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3355 psp->sys.start_addr = ucode_start_addr; 3356 break; 3357 case PSP_FW_TYPE_PSP_KDB: 3358 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3359 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3360 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3361 psp->kdb.start_addr = ucode_start_addr; 3362 break; 3363 case PSP_FW_TYPE_PSP_TOC: 3364 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3365 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3366 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3367 psp->toc.start_addr = ucode_start_addr; 3368 break; 3369 case PSP_FW_TYPE_PSP_SPL: 3370 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3371 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3372 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3373 psp->spl.start_addr = ucode_start_addr; 3374 break; 3375 case PSP_FW_TYPE_PSP_RL: 3376 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3377 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3378 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3379 psp->rl.start_addr = ucode_start_addr; 3380 break; 3381 case PSP_FW_TYPE_PSP_SOC_DRV: 3382 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3383 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3384 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3385 psp->soc_drv.start_addr = ucode_start_addr; 3386 break; 3387 case PSP_FW_TYPE_PSP_INTF_DRV: 3388 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3389 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3390 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3391 psp->intf_drv.start_addr = ucode_start_addr; 3392 break; 3393 case PSP_FW_TYPE_PSP_DBG_DRV: 3394 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3395 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3396 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3397 psp->dbg_drv.start_addr = ucode_start_addr; 3398 break; 3399 case PSP_FW_TYPE_PSP_RAS_DRV: 3400 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3401 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3402 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3403 psp->ras_drv.start_addr = ucode_start_addr; 3404 break; 3405 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3406 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3407 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3408 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3409 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3410 break; 3411 default: 3412 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3413 break; 3414 } 3415 3416 return 0; 3417 } 3418 3419 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3420 { 3421 const struct psp_firmware_header_v1_0 *sos_hdr; 3422 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3423 uint8_t *ucode_array_start_addr; 3424 3425 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3426 ucode_array_start_addr = (uint8_t *)sos_hdr + 3427 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3428 3429 if (adev->gmc.xgmi.connected_to_cpu || 3430 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3431 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3432 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3433 3434 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3435 adev->psp.sys.start_addr = ucode_array_start_addr; 3436 3437 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3438 adev->psp.sos.start_addr = ucode_array_start_addr + 3439 le32_to_cpu(sos_hdr->sos.offset_bytes); 3440 } else { 3441 /* Load alternate PSP SOS FW */ 3442 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3443 3444 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3445 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3446 3447 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3448 adev->psp.sys.start_addr = ucode_array_start_addr + 3449 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3450 3451 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3452 adev->psp.sos.start_addr = ucode_array_start_addr + 3453 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3454 } 3455 3456 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3457 dev_warn(adev->dev, "PSP SOS FW not available"); 3458 return -EINVAL; 3459 } 3460 3461 return 0; 3462 } 3463 3464 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3465 { 3466 struct amdgpu_device *adev = psp->adev; 3467 const struct psp_firmware_header_v1_0 *sos_hdr; 3468 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3469 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3470 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3471 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3472 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; 3473 int fw_index, fw_bin_count, start_index = 0; 3474 const struct psp_fw_bin_desc *fw_bin; 3475 uint8_t *ucode_array_start_addr; 3476 int err = 0; 3477 3478 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name); 3479 if (err) 3480 goto out; 3481 3482 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3483 ucode_array_start_addr = (uint8_t *)sos_hdr + 3484 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3485 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3486 3487 switch (sos_hdr->header.header_version_major) { 3488 case 1: 3489 err = psp_init_sos_base_fw(adev); 3490 if (err) 3491 goto out; 3492 3493 if (sos_hdr->header.header_version_minor == 1) { 3494 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3495 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3496 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3497 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3498 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3499 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3500 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3501 } 3502 if (sos_hdr->header.header_version_minor == 2) { 3503 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3504 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3505 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3506 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3507 } 3508 if (sos_hdr->header.header_version_minor == 3) { 3509 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3510 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3511 adev->psp.toc.start_addr = ucode_array_start_addr + 3512 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3513 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3514 adev->psp.kdb.start_addr = ucode_array_start_addr + 3515 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3516 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3517 adev->psp.spl.start_addr = ucode_array_start_addr + 3518 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3519 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3520 adev->psp.rl.start_addr = ucode_array_start_addr + 3521 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3522 } 3523 break; 3524 case 2: 3525 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3526 3527 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); 3528 3529 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { 3530 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3531 err = -EINVAL; 3532 goto out; 3533 } 3534 3535 if (sos_hdr_v2_0->header.header_version_minor == 1) { 3536 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; 3537 3538 fw_bin = sos_hdr_v2_1->psp_fw_bin; 3539 3540 if (psp_is_aux_sos_load_required(psp)) 3541 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3542 else 3543 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3544 3545 } else { 3546 fw_bin = sos_hdr_v2_0->psp_fw_bin; 3547 } 3548 3549 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { 3550 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, 3551 sos_hdr_v2_0); 3552 if (err) 3553 goto out; 3554 } 3555 break; 3556 default: 3557 dev_err(adev->dev, 3558 "unsupported psp sos firmware\n"); 3559 err = -EINVAL; 3560 goto out; 3561 } 3562 3563 return 0; 3564 out: 3565 amdgpu_ucode_release(&adev->psp.sos_fw); 3566 3567 return err; 3568 } 3569 3570 static bool is_ta_fw_applicable(struct psp_context *psp, 3571 const struct psp_fw_bin_desc *desc) 3572 { 3573 struct amdgpu_device *adev = psp->adev; 3574 uint32_t fw_version; 3575 3576 switch (desc->fw_type) { 3577 case TA_FW_TYPE_PSP_XGMI: 3578 case TA_FW_TYPE_PSP_XGMI_AUX: 3579 /* for now, AUX TA only exists on 13.0.6 ta bin, 3580 * from v20.00.0x.14 3581 */ 3582 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3583 IP_VERSION(13, 0, 6)) { 3584 fw_version = le32_to_cpu(desc->fw_version); 3585 3586 if (adev->flags & AMD_IS_APU && 3587 (fw_version & 0xff) >= 0x14) 3588 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; 3589 else 3590 return desc->fw_type == TA_FW_TYPE_PSP_XGMI; 3591 } 3592 break; 3593 default: 3594 break; 3595 } 3596 3597 return true; 3598 } 3599 3600 static int parse_ta_bin_descriptor(struct psp_context *psp, 3601 const struct psp_fw_bin_desc *desc, 3602 const struct ta_firmware_header_v2_0 *ta_hdr) 3603 { 3604 uint8_t *ucode_start_addr = NULL; 3605 3606 if (!psp || !desc || !ta_hdr) 3607 return -EINVAL; 3608 3609 if (!is_ta_fw_applicable(psp, desc)) 3610 return 0; 3611 3612 ucode_start_addr = (uint8_t *)ta_hdr + 3613 le32_to_cpu(desc->offset_bytes) + 3614 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3615 3616 switch (desc->fw_type) { 3617 case TA_FW_TYPE_PSP_ASD: 3618 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3619 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3620 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3621 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3622 break; 3623 case TA_FW_TYPE_PSP_XGMI: 3624 case TA_FW_TYPE_PSP_XGMI_AUX: 3625 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3626 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3627 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3628 break; 3629 case TA_FW_TYPE_PSP_RAS: 3630 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3631 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3632 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3633 break; 3634 case TA_FW_TYPE_PSP_HDCP: 3635 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3636 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3637 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3638 break; 3639 case TA_FW_TYPE_PSP_DTM: 3640 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3641 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3642 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3643 break; 3644 case TA_FW_TYPE_PSP_RAP: 3645 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3646 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3647 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3648 break; 3649 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3650 psp->securedisplay_context.context.bin_desc.fw_version = 3651 le32_to_cpu(desc->fw_version); 3652 psp->securedisplay_context.context.bin_desc.size_bytes = 3653 le32_to_cpu(desc->size_bytes); 3654 psp->securedisplay_context.context.bin_desc.start_addr = 3655 ucode_start_addr; 3656 break; 3657 default: 3658 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3659 break; 3660 } 3661 3662 return 0; 3663 } 3664 3665 static int parse_ta_v1_microcode(struct psp_context *psp) 3666 { 3667 const struct ta_firmware_header_v1_0 *ta_hdr; 3668 struct amdgpu_device *adev = psp->adev; 3669 3670 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3671 3672 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3673 return -EINVAL; 3674 3675 adev->psp.xgmi_context.context.bin_desc.fw_version = 3676 le32_to_cpu(ta_hdr->xgmi.fw_version); 3677 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3678 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3679 adev->psp.xgmi_context.context.bin_desc.start_addr = 3680 (uint8_t *)ta_hdr + 3681 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3682 3683 adev->psp.ras_context.context.bin_desc.fw_version = 3684 le32_to_cpu(ta_hdr->ras.fw_version); 3685 adev->psp.ras_context.context.bin_desc.size_bytes = 3686 le32_to_cpu(ta_hdr->ras.size_bytes); 3687 adev->psp.ras_context.context.bin_desc.start_addr = 3688 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3689 le32_to_cpu(ta_hdr->ras.offset_bytes); 3690 3691 adev->psp.hdcp_context.context.bin_desc.fw_version = 3692 le32_to_cpu(ta_hdr->hdcp.fw_version); 3693 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3694 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3695 adev->psp.hdcp_context.context.bin_desc.start_addr = 3696 (uint8_t *)ta_hdr + 3697 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3698 3699 adev->psp.dtm_context.context.bin_desc.fw_version = 3700 le32_to_cpu(ta_hdr->dtm.fw_version); 3701 adev->psp.dtm_context.context.bin_desc.size_bytes = 3702 le32_to_cpu(ta_hdr->dtm.size_bytes); 3703 adev->psp.dtm_context.context.bin_desc.start_addr = 3704 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3705 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3706 3707 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3708 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3709 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3710 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3711 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3712 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3713 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3714 3715 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3716 3717 return 0; 3718 } 3719 3720 static int parse_ta_v2_microcode(struct psp_context *psp) 3721 { 3722 const struct ta_firmware_header_v2_0 *ta_hdr; 3723 struct amdgpu_device *adev = psp->adev; 3724 int err = 0; 3725 int ta_index = 0; 3726 3727 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3728 3729 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3730 return -EINVAL; 3731 3732 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3733 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3734 return -EINVAL; 3735 } 3736 3737 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3738 err = parse_ta_bin_descriptor(psp, 3739 &ta_hdr->ta_fw_bin[ta_index], 3740 ta_hdr); 3741 if (err) 3742 return err; 3743 } 3744 3745 return 0; 3746 } 3747 3748 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3749 { 3750 const struct common_firmware_header *hdr; 3751 struct amdgpu_device *adev = psp->adev; 3752 int err; 3753 3754 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name); 3755 if (err) 3756 return err; 3757 3758 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3759 switch (le16_to_cpu(hdr->header_version_major)) { 3760 case 1: 3761 err = parse_ta_v1_microcode(psp); 3762 break; 3763 case 2: 3764 err = parse_ta_v2_microcode(psp); 3765 break; 3766 default: 3767 dev_err(adev->dev, "unsupported TA header version\n"); 3768 err = -EINVAL; 3769 } 3770 3771 if (err) 3772 amdgpu_ucode_release(&adev->psp.ta_fw); 3773 3774 return err; 3775 } 3776 3777 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3778 { 3779 struct amdgpu_device *adev = psp->adev; 3780 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3781 struct amdgpu_firmware_info *info = NULL; 3782 int err = 0; 3783 3784 if (!amdgpu_sriov_vf(adev)) { 3785 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3786 return -EINVAL; 3787 } 3788 3789 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name); 3790 if (err) { 3791 if (err == -ENODEV) { 3792 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3793 err = 0; 3794 goto out; 3795 } 3796 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3797 } 3798 3799 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3800 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3801 info->fw = adev->psp.cap_fw; 3802 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3803 adev->psp.cap_fw->data; 3804 adev->firmware.fw_size += ALIGN( 3805 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3806 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3807 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3808 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3809 3810 return 0; 3811 3812 out: 3813 amdgpu_ucode_release(&adev->psp.cap_fw); 3814 return err; 3815 } 3816 3817 int psp_config_sq_perfmon(struct psp_context *psp, 3818 uint32_t xcp_id, bool core_override_enable, 3819 bool reg_override_enable, bool perfmon_override_enable) 3820 { 3821 int ret; 3822 3823 if (amdgpu_sriov_vf(psp->adev)) 3824 return 0; 3825 3826 if (xcp_id > MAX_XCP) { 3827 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); 3828 return -EINVAL; 3829 } 3830 3831 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { 3832 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", 3833 amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); 3834 return -EINVAL; 3835 } 3836 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3837 3838 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; 3839 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); 3840 cmd->cmd.config_sq_perfmon.core_override = core_override_enable; 3841 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; 3842 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; 3843 3844 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 3845 if (ret) 3846 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", 3847 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); 3848 3849 release_psp_cmd_buf(psp); 3850 return ret; 3851 } 3852 3853 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 3854 enum amd_clockgating_state state) 3855 { 3856 return 0; 3857 } 3858 3859 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block, 3860 enum amd_powergating_state state) 3861 { 3862 return 0; 3863 } 3864 3865 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 3866 struct device_attribute *attr, 3867 char *buf) 3868 { 3869 struct drm_device *ddev = dev_get_drvdata(dev); 3870 struct amdgpu_device *adev = drm_to_adev(ddev); 3871 uint32_t fw_ver; 3872 int ret; 3873 3874 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3875 dev_info(adev->dev, "PSP block is not ready yet\n."); 3876 return -EBUSY; 3877 } 3878 3879 mutex_lock(&adev->psp.mutex); 3880 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 3881 mutex_unlock(&adev->psp.mutex); 3882 3883 if (ret) { 3884 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 3885 return ret; 3886 } 3887 3888 return sysfs_emit(buf, "%x\n", fw_ver); 3889 } 3890 3891 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 3892 struct device_attribute *attr, 3893 const char *buf, 3894 size_t count) 3895 { 3896 struct drm_device *ddev = dev_get_drvdata(dev); 3897 struct amdgpu_device *adev = drm_to_adev(ddev); 3898 int ret, idx; 3899 const struct firmware *usbc_pd_fw; 3900 struct amdgpu_bo *fw_buf_bo = NULL; 3901 uint64_t fw_pri_mc_addr; 3902 void *fw_pri_cpu_addr; 3903 3904 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3905 dev_err(adev->dev, "PSP block is not ready yet."); 3906 return -EBUSY; 3907 } 3908 3909 if (!drm_dev_enter(ddev, &idx)) 3910 return -ENODEV; 3911 3912 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf); 3913 if (ret) 3914 goto fail; 3915 3916 /* LFB address which is aligned to 1MB boundary per PSP request */ 3917 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3918 AMDGPU_GEM_DOMAIN_VRAM | 3919 AMDGPU_GEM_DOMAIN_GTT, 3920 &fw_buf_bo, &fw_pri_mc_addr, 3921 &fw_pri_cpu_addr); 3922 if (ret) 3923 goto rel_buf; 3924 3925 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 3926 3927 mutex_lock(&adev->psp.mutex); 3928 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 3929 mutex_unlock(&adev->psp.mutex); 3930 3931 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3932 3933 rel_buf: 3934 amdgpu_ucode_release(&usbc_pd_fw); 3935 fail: 3936 if (ret) { 3937 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 3938 count = ret; 3939 } 3940 3941 drm_dev_exit(idx); 3942 return count; 3943 } 3944 3945 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 3946 { 3947 int idx; 3948 3949 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 3950 return; 3951 3952 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 3953 memcpy(psp->fw_pri_buf, start_addr, bin_size); 3954 3955 drm_dev_exit(idx); 3956 } 3957 3958 /** 3959 * DOC: usbc_pd_fw 3960 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 3961 * this file will trigger the update process. 3962 */ 3963 static DEVICE_ATTR(usbc_pd_fw, 0644, 3964 psp_usbc_pd_fw_sysfs_read, 3965 psp_usbc_pd_fw_sysfs_write); 3966 3967 int is_psp_fw_valid(struct psp_bin_desc bin) 3968 { 3969 return bin.size_bytes; 3970 } 3971 3972 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 3973 struct bin_attribute *bin_attr, 3974 char *buffer, loff_t pos, size_t count) 3975 { 3976 struct device *dev = kobj_to_dev(kobj); 3977 struct drm_device *ddev = dev_get_drvdata(dev); 3978 struct amdgpu_device *adev = drm_to_adev(ddev); 3979 3980 adev->psp.vbflash_done = false; 3981 3982 /* Safeguard against memory drain */ 3983 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 3984 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 3985 kvfree(adev->psp.vbflash_tmp_buf); 3986 adev->psp.vbflash_tmp_buf = NULL; 3987 adev->psp.vbflash_image_size = 0; 3988 return -ENOMEM; 3989 } 3990 3991 /* TODO Just allocate max for now and optimize to realloc later if needed */ 3992 if (!adev->psp.vbflash_tmp_buf) { 3993 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 3994 if (!adev->psp.vbflash_tmp_buf) 3995 return -ENOMEM; 3996 } 3997 3998 mutex_lock(&adev->psp.mutex); 3999 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 4000 adev->psp.vbflash_image_size += count; 4001 mutex_unlock(&adev->psp.mutex); 4002 4003 dev_dbg(adev->dev, "IFWI staged for update\n"); 4004 4005 return count; 4006 } 4007 4008 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 4009 struct bin_attribute *bin_attr, char *buffer, 4010 loff_t pos, size_t count) 4011 { 4012 struct device *dev = kobj_to_dev(kobj); 4013 struct drm_device *ddev = dev_get_drvdata(dev); 4014 struct amdgpu_device *adev = drm_to_adev(ddev); 4015 struct amdgpu_bo *fw_buf_bo = NULL; 4016 uint64_t fw_pri_mc_addr; 4017 void *fw_pri_cpu_addr; 4018 int ret; 4019 4020 if (adev->psp.vbflash_image_size == 0) 4021 return -EINVAL; 4022 4023 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 4024 4025 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 4026 AMDGPU_GPU_PAGE_SIZE, 4027 AMDGPU_GEM_DOMAIN_VRAM, 4028 &fw_buf_bo, 4029 &fw_pri_mc_addr, 4030 &fw_pri_cpu_addr); 4031 if (ret) 4032 goto rel_buf; 4033 4034 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 4035 4036 mutex_lock(&adev->psp.mutex); 4037 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 4038 mutex_unlock(&adev->psp.mutex); 4039 4040 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4041 4042 rel_buf: 4043 kvfree(adev->psp.vbflash_tmp_buf); 4044 adev->psp.vbflash_tmp_buf = NULL; 4045 adev->psp.vbflash_image_size = 0; 4046 4047 if (ret) { 4048 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 4049 return ret; 4050 } 4051 4052 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 4053 return 0; 4054 } 4055 4056 /** 4057 * DOC: psp_vbflash 4058 * Writing to this file will stage an IFWI for update. Reading from this file 4059 * will trigger the update process. 4060 */ 4061 static struct bin_attribute psp_vbflash_bin_attr = { 4062 .attr = {.name = "psp_vbflash", .mode = 0660}, 4063 .size = 0, 4064 .write = amdgpu_psp_vbflash_write, 4065 .read = amdgpu_psp_vbflash_read, 4066 }; 4067 4068 /** 4069 * DOC: psp_vbflash_status 4070 * The status of the flash process. 4071 * 0: IFWI flash not complete. 4072 * 1: IFWI flash complete. 4073 */ 4074 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 4075 struct device_attribute *attr, 4076 char *buf) 4077 { 4078 struct drm_device *ddev = dev_get_drvdata(dev); 4079 struct amdgpu_device *adev = drm_to_adev(ddev); 4080 uint32_t vbflash_status; 4081 4082 vbflash_status = psp_vbflash_status(&adev->psp); 4083 if (!adev->psp.vbflash_done) 4084 vbflash_status = 0; 4085 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 4086 vbflash_status = 1; 4087 4088 return sysfs_emit(buf, "0x%x\n", vbflash_status); 4089 } 4090 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 4091 4092 static struct bin_attribute *bin_flash_attrs[] = { 4093 &psp_vbflash_bin_attr, 4094 NULL 4095 }; 4096 4097 static struct attribute *flash_attrs[] = { 4098 &dev_attr_psp_vbflash_status.attr, 4099 &dev_attr_usbc_pd_fw.attr, 4100 NULL 4101 }; 4102 4103 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 4104 { 4105 struct device *dev = kobj_to_dev(kobj); 4106 struct drm_device *ddev = dev_get_drvdata(dev); 4107 struct amdgpu_device *adev = drm_to_adev(ddev); 4108 4109 if (attr == &dev_attr_usbc_pd_fw.attr) 4110 return adev->psp.sup_pd_fw_up ? 0660 : 0; 4111 4112 return adev->psp.sup_ifwi_up ? 0440 : 0; 4113 } 4114 4115 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 4116 const struct bin_attribute *attr, 4117 int idx) 4118 { 4119 struct device *dev = kobj_to_dev(kobj); 4120 struct drm_device *ddev = dev_get_drvdata(dev); 4121 struct amdgpu_device *adev = drm_to_adev(ddev); 4122 4123 return adev->psp.sup_ifwi_up ? 0660 : 0; 4124 } 4125 4126 const struct attribute_group amdgpu_flash_attr_group = { 4127 .attrs = flash_attrs, 4128 .bin_attrs = bin_flash_attrs, 4129 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 4130 .is_visible = amdgpu_flash_attr_is_visible, 4131 }; 4132 4133 const struct amd_ip_funcs psp_ip_funcs = { 4134 .name = "psp", 4135 .early_init = psp_early_init, 4136 .sw_init = psp_sw_init, 4137 .sw_fini = psp_sw_fini, 4138 .hw_init = psp_hw_init, 4139 .hw_fini = psp_hw_fini, 4140 .suspend = psp_suspend, 4141 .resume = psp_resume, 4142 .set_clockgating_state = psp_set_clockgating_state, 4143 .set_powergating_state = psp_set_powergating_state, 4144 }; 4145 4146 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 4147 .type = AMD_IP_BLOCK_TYPE_PSP, 4148 .major = 3, 4149 .minor = 1, 4150 .rev = 0, 4151 .funcs = &psp_ip_funcs, 4152 }; 4153 4154 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 4155 .type = AMD_IP_BLOCK_TYPE_PSP, 4156 .major = 10, 4157 .minor = 0, 4158 .rev = 0, 4159 .funcs = &psp_ip_funcs, 4160 }; 4161 4162 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 4163 .type = AMD_IP_BLOCK_TYPE_PSP, 4164 .major = 11, 4165 .minor = 0, 4166 .rev = 0, 4167 .funcs = &psp_ip_funcs, 4168 }; 4169 4170 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 4171 .type = AMD_IP_BLOCK_TYPE_PSP, 4172 .major = 11, 4173 .minor = 0, 4174 .rev = 8, 4175 .funcs = &psp_ip_funcs, 4176 }; 4177 4178 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 4179 .type = AMD_IP_BLOCK_TYPE_PSP, 4180 .major = 12, 4181 .minor = 0, 4182 .rev = 0, 4183 .funcs = &psp_ip_funcs, 4184 }; 4185 4186 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4187 .type = AMD_IP_BLOCK_TYPE_PSP, 4188 .major = 13, 4189 .minor = 0, 4190 .rev = 0, 4191 .funcs = &psp_ip_funcs, 4192 }; 4193 4194 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4195 .type = AMD_IP_BLOCK_TYPE_PSP, 4196 .major = 13, 4197 .minor = 0, 4198 .rev = 4, 4199 .funcs = &psp_ip_funcs, 4200 }; 4201 4202 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4203 .type = AMD_IP_BLOCK_TYPE_PSP, 4204 .major = 14, 4205 .minor = 0, 4206 .rev = 0, 4207 .funcs = &psp_ip_funcs, 4208 }; 4209