1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 #include "psp_v15_0.h" 43 #include "psp_v15_0_8.h" 44 45 #include "amdgpu_ras.h" 46 #include "amdgpu_securedisplay.h" 47 #include "amdgpu_atomfirmware.h" 48 49 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16) 50 51 static int psp_load_smu_fw(struct psp_context *psp); 52 static int psp_rap_terminate(struct psp_context *psp); 53 static int psp_securedisplay_terminate(struct psp_context *psp); 54 55 static int psp_ring_init(struct psp_context *psp, 56 enum psp_ring_type ring_type) 57 { 58 int ret = 0; 59 struct psp_ring *ring; 60 struct amdgpu_device *adev = psp->adev; 61 62 ring = &psp->km_ring; 63 64 ring->ring_type = ring_type; 65 66 /* allocate 4k Page of Local Frame Buffer memory for ring */ 67 ring->ring_size = 0x1000; 68 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 69 AMDGPU_GEM_DOMAIN_VRAM | 70 AMDGPU_GEM_DOMAIN_GTT, 71 &adev->firmware.rbuf, 72 &ring->ring_mem_mc_addr, 73 (void **)&ring->ring_mem); 74 if (ret) { 75 ring->ring_size = 0; 76 return ret; 77 } 78 79 return 0; 80 } 81 82 /* 83 * Due to DF Cstate management centralized to PMFW, the firmware 84 * loading sequence will be updated as below: 85 * - Load KDB 86 * - Load SYS_DRV 87 * - Load tOS 88 * - Load PMFW 89 * - Setup TMR 90 * - Load other non-psp fw 91 * - Load ASD 92 * - Load XGMI/RAS/HDCP/DTM TA if any 93 * 94 * This new sequence is required for 95 * - Arcturus and onwards 96 */ 97 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 98 { 99 struct amdgpu_device *adev = psp->adev; 100 101 if (amdgpu_sriov_vf(adev)) { 102 psp->pmfw_centralized_cstate_management = false; 103 return; 104 } 105 106 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 107 case IP_VERSION(11, 0, 0): 108 case IP_VERSION(11, 0, 4): 109 case IP_VERSION(11, 0, 5): 110 case IP_VERSION(11, 0, 7): 111 case IP_VERSION(11, 0, 9): 112 case IP_VERSION(11, 0, 11): 113 case IP_VERSION(11, 0, 12): 114 case IP_VERSION(11, 0, 13): 115 case IP_VERSION(13, 0, 0): 116 case IP_VERSION(13, 0, 2): 117 case IP_VERSION(13, 0, 7): 118 psp->pmfw_centralized_cstate_management = true; 119 break; 120 default: 121 psp->pmfw_centralized_cstate_management = false; 122 break; 123 } 124 } 125 126 static int psp_init_sriov_microcode(struct psp_context *psp) 127 { 128 struct amdgpu_device *adev = psp->adev; 129 char ucode_prefix[30]; 130 int ret = 0; 131 132 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 133 134 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 135 case IP_VERSION(9, 0, 0): 136 case IP_VERSION(11, 0, 7): 137 case IP_VERSION(11, 0, 9): 138 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 139 ret = psp_init_cap_microcode(psp, ucode_prefix); 140 break; 141 case IP_VERSION(13, 0, 2): 142 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 143 ret = psp_init_cap_microcode(psp, ucode_prefix); 144 ret &= psp_init_ta_microcode(psp, ucode_prefix); 145 break; 146 case IP_VERSION(13, 0, 0): 147 adev->virt.autoload_ucode_id = 0; 148 break; 149 case IP_VERSION(13, 0, 6): 150 case IP_VERSION(13, 0, 14): 151 ret = psp_init_cap_microcode(psp, ucode_prefix); 152 ret &= psp_init_ta_microcode(psp, ucode_prefix); 153 break; 154 case IP_VERSION(13, 0, 10): 155 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 156 ret = psp_init_cap_microcode(psp, ucode_prefix); 157 break; 158 case IP_VERSION(13, 0, 12): 159 ret = psp_init_ta_microcode(psp, ucode_prefix); 160 break; 161 default: 162 return -EINVAL; 163 } 164 return ret; 165 } 166 167 static int psp_early_init(struct amdgpu_ip_block *ip_block) 168 { 169 struct amdgpu_device *adev = ip_block->adev; 170 struct psp_context *psp = &adev->psp; 171 172 psp->autoload_supported = true; 173 psp->boot_time_tmr = true; 174 175 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 176 case IP_VERSION(9, 0, 0): 177 psp_v3_1_set_psp_funcs(psp); 178 psp->autoload_supported = false; 179 psp->boot_time_tmr = false; 180 break; 181 case IP_VERSION(10, 0, 0): 182 case IP_VERSION(10, 0, 1): 183 psp_v10_0_set_psp_funcs(psp); 184 psp->autoload_supported = false; 185 psp->boot_time_tmr = false; 186 break; 187 case IP_VERSION(11, 0, 2): 188 case IP_VERSION(11, 0, 4): 189 psp_v11_0_set_psp_funcs(psp); 190 psp->autoload_supported = false; 191 psp->boot_time_tmr = false; 192 break; 193 case IP_VERSION(11, 0, 0): 194 case IP_VERSION(11, 0, 7): 195 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 196 fallthrough; 197 case IP_VERSION(11, 0, 5): 198 case IP_VERSION(11, 0, 9): 199 case IP_VERSION(11, 0, 11): 200 case IP_VERSION(11, 5, 0): 201 case IP_VERSION(11, 5, 2): 202 case IP_VERSION(11, 0, 12): 203 case IP_VERSION(11, 0, 13): 204 psp_v11_0_set_psp_funcs(psp); 205 psp->boot_time_tmr = false; 206 break; 207 case IP_VERSION(11, 0, 3): 208 case IP_VERSION(12, 0, 1): 209 psp_v12_0_set_psp_funcs(psp); 210 psp->autoload_supported = false; 211 psp->boot_time_tmr = false; 212 break; 213 case IP_VERSION(13, 0, 2): 214 psp->boot_time_tmr = false; 215 fallthrough; 216 case IP_VERSION(13, 0, 6): 217 case IP_VERSION(13, 0, 14): 218 psp_v13_0_set_psp_funcs(psp); 219 psp->autoload_supported = false; 220 break; 221 case IP_VERSION(13, 0, 12): 222 psp_v13_0_set_psp_funcs(psp); 223 psp->autoload_supported = false; 224 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 225 break; 226 case IP_VERSION(13, 0, 1): 227 case IP_VERSION(13, 0, 3): 228 case IP_VERSION(13, 0, 5): 229 case IP_VERSION(13, 0, 8): 230 case IP_VERSION(13, 0, 11): 231 case IP_VERSION(14, 0, 0): 232 case IP_VERSION(14, 0, 1): 233 case IP_VERSION(14, 0, 4): 234 psp_v13_0_set_psp_funcs(psp); 235 psp->boot_time_tmr = false; 236 break; 237 case IP_VERSION(11, 0, 8): 238 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 239 psp_v11_0_8_set_psp_funcs(psp); 240 } 241 psp->autoload_supported = false; 242 psp->boot_time_tmr = false; 243 break; 244 case IP_VERSION(13, 0, 0): 245 case IP_VERSION(13, 0, 7): 246 case IP_VERSION(13, 0, 10): 247 psp_v13_0_set_psp_funcs(psp); 248 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 249 psp->boot_time_tmr = false; 250 break; 251 case IP_VERSION(13, 0, 4): 252 psp_v13_0_4_set_psp_funcs(psp); 253 psp->boot_time_tmr = false; 254 break; 255 case IP_VERSION(14, 0, 2): 256 case IP_VERSION(14, 0, 3): 257 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 258 psp_v14_0_set_psp_funcs(psp); 259 break; 260 case IP_VERSION(14, 0, 5): 261 psp_v14_0_set_psp_funcs(psp); 262 psp->boot_time_tmr = false; 263 break; 264 case IP_VERSION(15, 0, 0): 265 psp_v15_0_0_set_psp_funcs(psp); 266 psp->boot_time_tmr = false; 267 break; 268 case IP_VERSION(15, 0, 8): 269 psp_v15_0_8_set_psp_funcs(psp); 270 break; 271 default: 272 return -EINVAL; 273 } 274 275 psp->adev = adev; 276 277 adev->psp_timeout = 20000; 278 279 psp_check_pmfw_centralized_cstate_management(psp); 280 281 if (amdgpu_sriov_vf(adev)) 282 return psp_init_sriov_microcode(psp); 283 else 284 return psp_init_microcode(psp); 285 } 286 287 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 288 { 289 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 290 &mem_ctx->shared_buf); 291 mem_ctx->shared_bo = NULL; 292 } 293 294 static void psp_free_shared_bufs(struct psp_context *psp) 295 { 296 void *tmr_buf; 297 void **pptr; 298 299 /* free TMR memory buffer */ 300 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 301 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 302 psp->tmr_bo = NULL; 303 304 /* free xgmi shared memory */ 305 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 306 307 /* free ras shared memory */ 308 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 309 310 /* free hdcp shared memory */ 311 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 312 313 /* free dtm shared memory */ 314 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 315 316 /* free rap shared memory */ 317 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 318 319 /* free securedisplay shared memory */ 320 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 321 322 323 } 324 325 static void psp_memory_training_fini(struct psp_context *psp) 326 { 327 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 328 329 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 330 kfree(ctx->sys_cache); 331 ctx->sys_cache = NULL; 332 } 333 334 static int psp_memory_training_init(struct psp_context *psp) 335 { 336 int ret; 337 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 338 339 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 340 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 341 return 0; 342 } 343 344 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 345 if (ctx->sys_cache == NULL) { 346 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 347 ret = -ENOMEM; 348 goto Err_out; 349 } 350 351 dev_dbg(psp->adev->dev, 352 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 353 ctx->train_data_size, 354 ctx->p2c_train_data_offset, 355 ctx->c2p_train_data_offset); 356 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 357 return 0; 358 359 Err_out: 360 psp_memory_training_fini(psp); 361 return ret; 362 } 363 364 /* 365 * Helper funciton to query psp runtime database entry 366 * 367 * @adev: amdgpu_device pointer 368 * @entry_type: the type of psp runtime database entry 369 * @db_entry: runtime database entry pointer 370 * 371 * Return false if runtime database doesn't exit or entry is invalid 372 * or true if the specific database entry is found, and copy to @db_entry 373 */ 374 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 375 enum psp_runtime_entry_type entry_type, 376 void *db_entry) 377 { 378 uint64_t db_header_pos, db_dir_pos; 379 struct psp_runtime_data_header db_header = {0}; 380 struct psp_runtime_data_directory db_dir = {0}; 381 bool ret = false; 382 int i; 383 384 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 385 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 386 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 387 return false; 388 389 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 390 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 391 392 /* read runtime db header from vram */ 393 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 394 sizeof(struct psp_runtime_data_header), false); 395 396 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 397 /* runtime db doesn't exist, exit */ 398 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 399 return false; 400 } 401 402 /* read runtime database entry from vram */ 403 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 404 sizeof(struct psp_runtime_data_directory), false); 405 406 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 407 /* invalid db entry count, exit */ 408 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 409 return false; 410 } 411 412 /* look up for requested entry type */ 413 for (i = 0; i < db_dir.entry_count && !ret; i++) { 414 if (db_dir.entry_list[i].entry_type == entry_type) { 415 switch (entry_type) { 416 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 417 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 418 /* invalid db entry size */ 419 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 420 return false; 421 } 422 /* read runtime database entry */ 423 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 424 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 425 ret = true; 426 break; 427 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 428 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 429 /* invalid db entry size */ 430 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 431 return false; 432 } 433 /* read runtime database entry */ 434 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 435 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 436 ret = true; 437 break; 438 default: 439 ret = false; 440 break; 441 } 442 } 443 } 444 445 return ret; 446 } 447 448 static int psp_sw_init(struct amdgpu_ip_block *ip_block) 449 { 450 struct amdgpu_device *adev = ip_block->adev; 451 struct psp_context *psp = &adev->psp; 452 int ret; 453 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 454 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 455 struct psp_runtime_scpm_entry scpm_entry; 456 457 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 458 if (!psp->cmd) { 459 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 460 return -ENOMEM; 461 } 462 463 adev->psp.xgmi_context.supports_extended_data = 464 !adev->gmc.xgmi.connected_to_cpu && 465 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 466 467 memset(&scpm_entry, 0, sizeof(scpm_entry)); 468 if ((psp_get_runtime_db_entry(adev, 469 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 470 &scpm_entry)) && 471 (scpm_entry.scpm_status != SCPM_DISABLE)) { 472 adev->scpm_enabled = true; 473 adev->scpm_status = scpm_entry.scpm_status; 474 } else { 475 adev->scpm_enabled = false; 476 adev->scpm_status = SCPM_DISABLE; 477 } 478 479 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 480 481 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 482 if (psp_get_runtime_db_entry(adev, 483 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 484 &boot_cfg_entry)) { 485 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 486 if ((psp->boot_cfg_bitmask) & 487 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 488 /* If psp runtime database exists, then 489 * only enable two stage memory training 490 * when TWO_STAGE_DRAM_TRAINING bit is set 491 * in runtime database 492 */ 493 mem_training_ctx->enable_mem_training = true; 494 } 495 496 } else { 497 /* If psp runtime database doesn't exist or is 498 * invalid, force enable two stage memory training 499 */ 500 mem_training_ctx->enable_mem_training = true; 501 } 502 503 if (mem_training_ctx->enable_mem_training) { 504 ret = psp_memory_training_init(psp); 505 if (ret) { 506 dev_err(adev->dev, "Failed to initialize memory training!\n"); 507 return ret; 508 } 509 510 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 511 if (ret) { 512 dev_err(adev->dev, "Failed to process memory training!\n"); 513 return ret; 514 } 515 } 516 517 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 518 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 519 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 520 &psp->fw_pri_bo, 521 &psp->fw_pri_mc_addr, 522 &psp->fw_pri_buf); 523 if (ret) 524 return ret; 525 526 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 527 AMDGPU_GEM_DOMAIN_VRAM | 528 AMDGPU_GEM_DOMAIN_GTT, 529 &psp->fence_buf_bo, 530 &psp->fence_buf_mc_addr, 531 &psp->fence_buf); 532 if (ret) 533 goto failed1; 534 535 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 536 AMDGPU_GEM_DOMAIN_VRAM | 537 AMDGPU_GEM_DOMAIN_GTT, 538 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 539 (void **)&psp->cmd_buf_mem); 540 if (ret) 541 goto failed2; 542 543 return 0; 544 545 failed2: 546 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 547 &psp->fence_buf_mc_addr, &psp->fence_buf); 548 failed1: 549 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 550 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 551 return ret; 552 } 553 554 static int psp_sw_fini(struct amdgpu_ip_block *ip_block) 555 { 556 struct amdgpu_device *adev = ip_block->adev; 557 struct psp_context *psp = &adev->psp; 558 559 psp_memory_training_fini(psp); 560 561 amdgpu_ucode_release(&psp->sos_fw); 562 amdgpu_ucode_release(&psp->asd_fw); 563 amdgpu_ucode_release(&psp->ta_fw); 564 amdgpu_ucode_release(&psp->cap_fw); 565 amdgpu_ucode_release(&psp->toc_fw); 566 567 kfree(psp->cmd); 568 psp->cmd = NULL; 569 570 psp_free_shared_bufs(psp); 571 572 if (psp->km_ring.ring_mem) 573 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 574 &psp->km_ring.ring_mem_mc_addr, 575 (void **)&psp->km_ring.ring_mem); 576 577 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 578 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 579 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 580 &psp->fence_buf_mc_addr, &psp->fence_buf); 581 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 582 (void **)&psp->cmd_buf_mem); 583 584 return 0; 585 } 586 587 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val, 588 uint32_t mask, uint32_t flags) 589 { 590 bool check_changed = flags & PSP_WAITREG_CHANGED; 591 bool verbose = !(flags & PSP_WAITREG_NOVERBOSE); 592 uint32_t val; 593 int i; 594 struct amdgpu_device *adev = psp->adev; 595 596 if (psp->adev->no_hw_access) 597 return 0; 598 599 for (i = 0; i < adev->usec_timeout; i++) { 600 val = RREG32(reg_index); 601 if (check_changed) { 602 if (val != reg_val) 603 return 0; 604 } else { 605 if ((val & mask) == reg_val) 606 return 0; 607 } 608 udelay(1); 609 } 610 611 if (verbose) 612 dev_err(adev->dev, 613 "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x", 614 reg_index, mask, val, reg_val); 615 616 return -ETIME; 617 } 618 619 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 620 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 621 { 622 uint32_t val; 623 int i; 624 struct amdgpu_device *adev = psp->adev; 625 626 if (psp->adev->no_hw_access) 627 return 0; 628 629 for (i = 0; i < msec_timeout; i++) { 630 val = RREG32(reg_index); 631 if ((val & mask) == reg_val) 632 return 0; 633 msleep(1); 634 } 635 636 return -ETIME; 637 } 638 639 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 640 { 641 switch (cmd_id) { 642 case GFX_CMD_ID_LOAD_TA: 643 return "LOAD_TA"; 644 case GFX_CMD_ID_UNLOAD_TA: 645 return "UNLOAD_TA"; 646 case GFX_CMD_ID_INVOKE_CMD: 647 return "INVOKE_CMD"; 648 case GFX_CMD_ID_LOAD_ASD: 649 return "LOAD_ASD"; 650 case GFX_CMD_ID_SETUP_TMR: 651 return "SETUP_TMR"; 652 case GFX_CMD_ID_LOAD_IP_FW: 653 return "LOAD_IP_FW"; 654 case GFX_CMD_ID_DESTROY_TMR: 655 return "DESTROY_TMR"; 656 case GFX_CMD_ID_SAVE_RESTORE: 657 return "SAVE_RESTORE_IP_FW"; 658 case GFX_CMD_ID_SETUP_VMR: 659 return "SETUP_VMR"; 660 case GFX_CMD_ID_DESTROY_VMR: 661 return "DESTROY_VMR"; 662 case GFX_CMD_ID_PROG_REG: 663 return "PROG_REG"; 664 case GFX_CMD_ID_GET_FW_ATTESTATION: 665 return "GET_FW_ATTESTATION"; 666 case GFX_CMD_ID_LOAD_TOC: 667 return "ID_LOAD_TOC"; 668 case GFX_CMD_ID_AUTOLOAD_RLC: 669 return "AUTOLOAD_RLC"; 670 case GFX_CMD_ID_BOOT_CFG: 671 return "BOOT_CFG"; 672 case GFX_CMD_ID_CONFIG_SQ_PERFMON: 673 return "CONFIG_SQ_PERFMON"; 674 case GFX_CMD_ID_FB_FW_RESERV_ADDR: 675 return "FB_FW_RESERV_ADDR"; 676 case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR: 677 return "FB_FW_RESERV_EXT_ADDR"; 678 case GFX_CMD_ID_SRIOV_SPATIAL_PART: 679 return "SPATIAL_PARTITION"; 680 case GFX_CMD_ID_FB_NPS_MODE: 681 return "NPS_MODE_CHANGE"; 682 default: 683 return "UNKNOWN CMD"; 684 } 685 } 686 687 static bool psp_err_warn(struct psp_context *psp) 688 { 689 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 690 691 /* This response indicates reg list is already loaded */ 692 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 693 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 694 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 695 cmd->resp.status == TEE_ERROR_CANCEL) 696 return false; 697 698 return true; 699 } 700 701 static int 702 psp_cmd_submit_buf(struct psp_context *psp, 703 struct amdgpu_firmware_info *ucode, 704 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 705 { 706 int ret; 707 int index; 708 int timeout = psp->adev->psp_timeout; 709 bool ras_intr = false; 710 bool skip_unsupport = false; 711 712 if (psp->adev->no_hw_access) 713 return 0; 714 715 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 716 717 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 718 719 index = atomic_inc_return(&psp->fence_value); 720 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 721 if (ret) { 722 atomic_dec(&psp->fence_value); 723 goto exit; 724 } 725 726 amdgpu_device_invalidate_hdp(psp->adev, NULL); 727 while (*((unsigned int *)psp->fence_buf) != index) { 728 if (--timeout == 0) 729 break; 730 /* 731 * Shouldn't wait for timeout when err_event_athub occurs, 732 * because gpu reset thread triggered and lock resource should 733 * be released for psp resume sequence. 734 */ 735 ras_intr = amdgpu_ras_intr_triggered(); 736 if (ras_intr) 737 break; 738 usleep_range(10, 100); 739 amdgpu_device_invalidate_hdp(psp->adev, NULL); 740 } 741 742 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 743 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 744 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 745 746 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 747 748 /* In some cases, psp response status is not 0 even there is no 749 * problem while the command is submitted. Some version of PSP FW 750 * doesn't write 0 to that field. 751 * So here we would like to only print a warning instead of an error 752 * during psp initialization to avoid breaking hw_init and it doesn't 753 * return -EINVAL. 754 */ 755 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 756 if (ucode) 757 dev_warn(psp->adev->dev, 758 "failed to load ucode %s(0x%X) ", 759 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 760 if (psp_err_warn(psp)) 761 dev_warn( 762 psp->adev->dev, 763 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 764 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 765 psp->cmd_buf_mem->cmd_id, 766 psp->cmd_buf_mem->resp.status); 767 /* If any firmware (including CAP) load fails under SRIOV, it should 768 * return failure to stop the VF from initializing. 769 * Also return failure in case of timeout 770 */ 771 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 772 ret = -EINVAL; 773 goto exit; 774 } 775 } 776 777 if (ucode) { 778 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 779 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 780 } 781 782 exit: 783 return ret; 784 } 785 786 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 787 { 788 struct psp_gfx_cmd_resp *cmd = psp->cmd; 789 790 mutex_lock(&psp->mutex); 791 792 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 793 794 return cmd; 795 } 796 797 static void release_psp_cmd_buf(struct psp_context *psp) 798 { 799 mutex_unlock(&psp->mutex); 800 } 801 802 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 803 struct psp_gfx_cmd_resp *cmd, 804 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 805 { 806 struct amdgpu_device *adev = psp->adev; 807 uint32_t size = 0; 808 uint64_t tmr_pa = 0; 809 810 if (tmr_bo) { 811 size = amdgpu_bo_size(tmr_bo); 812 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 813 } 814 815 if (amdgpu_sriov_vf(psp->adev)) 816 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 817 else 818 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 819 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 820 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 821 cmd->cmd.cmd_setup_tmr.buf_size = size; 822 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 823 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 824 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 825 } 826 827 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 828 uint64_t pri_buf_mc, uint32_t size) 829 { 830 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 831 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 832 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 833 cmd->cmd.cmd_load_toc.toc_size = size; 834 } 835 836 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 837 static int psp_load_toc(struct psp_context *psp, 838 uint32_t *tmr_size) 839 { 840 int ret; 841 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 842 843 /* Copy toc to psp firmware private buffer */ 844 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 845 846 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 847 848 ret = psp_cmd_submit_buf(psp, NULL, cmd, 849 psp->fence_buf_mc_addr); 850 if (!ret) 851 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 852 853 release_psp_cmd_buf(psp); 854 855 return ret; 856 } 857 858 /* Set up Trusted Memory Region */ 859 static int psp_tmr_init(struct psp_context *psp) 860 { 861 int ret = 0; 862 int tmr_size; 863 void *tmr_buf; 864 void **pptr; 865 866 /* 867 * According to HW engineer, they prefer the TMR address be "naturally 868 * aligned" , e.g. the start address be an integer divide of TMR size. 869 * 870 * Note: this memory need be reserved till the driver 871 * uninitializes. 872 */ 873 tmr_size = PSP_TMR_SIZE(psp->adev); 874 875 /* For ASICs support RLC autoload, psp will parse the toc 876 * and calculate the total size of TMR needed 877 */ 878 if (!amdgpu_sriov_vf(psp->adev) && 879 psp->toc.start_addr && 880 psp->toc.size_bytes && 881 psp->fw_pri_buf) { 882 ret = psp_load_toc(psp, &tmr_size); 883 if (ret) { 884 dev_err(psp->adev->dev, "Failed to load toc\n"); 885 return ret; 886 } 887 } 888 889 if (!psp->tmr_bo && !psp->boot_time_tmr) { 890 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 891 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 892 PSP_TMR_ALIGNMENT, 893 AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM, 894 &psp->tmr_bo, &psp->tmr_mc_addr, 895 pptr); 896 } 897 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo) 898 psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo); 899 900 return ret; 901 } 902 903 static bool psp_skip_tmr(struct psp_context *psp) 904 { 905 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 906 case IP_VERSION(11, 0, 9): 907 case IP_VERSION(11, 0, 7): 908 case IP_VERSION(13, 0, 2): 909 case IP_VERSION(13, 0, 6): 910 case IP_VERSION(13, 0, 10): 911 case IP_VERSION(13, 0, 12): 912 case IP_VERSION(13, 0, 14): 913 case IP_VERSION(15, 0, 0): 914 case IP_VERSION(15, 0, 8): 915 return true; 916 default: 917 return false; 918 } 919 } 920 921 static int psp_tmr_load(struct psp_context *psp) 922 { 923 int ret; 924 struct psp_gfx_cmd_resp *cmd; 925 926 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 927 * Already set up by host driver. 928 */ 929 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 930 return 0; 931 932 cmd = acquire_psp_cmd_buf(psp); 933 934 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 935 if (psp->tmr_bo) 936 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 937 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 938 939 ret = psp_cmd_submit_buf(psp, NULL, cmd, 940 psp->fence_buf_mc_addr); 941 942 release_psp_cmd_buf(psp); 943 944 return ret; 945 } 946 947 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 948 struct psp_gfx_cmd_resp *cmd) 949 { 950 if (amdgpu_sriov_vf(psp->adev)) 951 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 952 else 953 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 954 } 955 956 static int psp_tmr_unload(struct psp_context *psp) 957 { 958 int ret; 959 struct psp_gfx_cmd_resp *cmd; 960 961 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 962 * as TMR is not loaded at all 963 */ 964 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 965 return 0; 966 967 cmd = acquire_psp_cmd_buf(psp); 968 969 psp_prep_tmr_unload_cmd_buf(psp, cmd); 970 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 971 972 ret = psp_cmd_submit_buf(psp, NULL, cmd, 973 psp->fence_buf_mc_addr); 974 975 release_psp_cmd_buf(psp); 976 977 return ret; 978 } 979 980 static int psp_tmr_terminate(struct psp_context *psp) 981 { 982 return psp_tmr_unload(psp); 983 } 984 985 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 986 uint64_t *output_ptr) 987 { 988 int ret; 989 struct psp_gfx_cmd_resp *cmd; 990 991 if (!output_ptr) 992 return -EINVAL; 993 994 if (amdgpu_sriov_vf(psp->adev)) 995 return 0; 996 997 cmd = acquire_psp_cmd_buf(psp); 998 999 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 1000 1001 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1002 psp->fence_buf_mc_addr); 1003 1004 if (!ret) { 1005 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 1006 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 1007 } 1008 1009 release_psp_cmd_buf(psp); 1010 1011 return ret; 1012 } 1013 1014 static int psp_get_fw_reservation_info(struct psp_context *psp, 1015 uint32_t cmd_id, 1016 uint64_t *addr, 1017 uint32_t *size) 1018 { 1019 int ret; 1020 uint32_t status; 1021 struct psp_gfx_cmd_resp *cmd; 1022 1023 cmd = acquire_psp_cmd_buf(psp); 1024 1025 cmd->cmd_id = cmd_id; 1026 1027 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1028 psp->fence_buf_mc_addr); 1029 if (ret) { 1030 release_psp_cmd_buf(psp); 1031 return ret; 1032 } 1033 1034 status = cmd->resp.status; 1035 if (status == PSP_ERR_UNKNOWN_COMMAND) { 1036 release_psp_cmd_buf(psp); 1037 *addr = 0; 1038 *size = 0; 1039 return 0; 1040 } 1041 1042 *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 | 1043 cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo; 1044 *size = cmd->resp.uresp.fw_reserve_info.reserve_size; 1045 1046 release_psp_cmd_buf(psp); 1047 1048 return 0; 1049 } 1050 1051 int psp_update_fw_reservation(struct psp_context *psp) 1052 { 1053 int ret; 1054 uint64_t reserv_addr, reserv_addr_ext; 1055 uint32_t reserv_size, reserv_size_ext, mp0_ip_ver; 1056 struct amdgpu_device *adev = psp->adev; 1057 1058 mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0); 1059 1060 if (amdgpu_sriov_vf(psp->adev)) 1061 return 0; 1062 1063 switch (mp0_ip_ver) { 1064 case IP_VERSION(14, 0, 2): 1065 if (adev->psp.sos.fw_version < 0x3b0e0d) 1066 return 0; 1067 break; 1068 1069 case IP_VERSION(14, 0, 3): 1070 if (adev->psp.sos.fw_version < 0x3a0e14) 1071 return 0; 1072 break; 1073 1074 default: 1075 return 0; 1076 } 1077 1078 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); 1079 if (ret) 1080 return ret; 1081 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext); 1082 if (ret) 1083 return ret; 1084 1085 if (reserv_addr != adev->gmc.real_vram_size - reserv_size) { 1086 dev_warn(adev->dev, "reserve fw region is not valid!\n"); 1087 return 0; 1088 } 1089 1090 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1091 1092 reserv_size = roundup(reserv_size, SZ_1M); 1093 1094 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL); 1095 if (ret) { 1096 dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret); 1097 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1098 return ret; 1099 } 1100 1101 reserv_size_ext = roundup(reserv_size_ext, SZ_1M); 1102 1103 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext, 1104 &adev->mman.fw_reserved_memory_extend, NULL); 1105 if (ret) { 1106 dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret); 1107 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL); 1108 return ret; 1109 } 1110 1111 return 0; 1112 } 1113 1114 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 1115 { 1116 struct psp_context *psp = &adev->psp; 1117 struct psp_gfx_cmd_resp *cmd; 1118 int ret; 1119 1120 if (amdgpu_sriov_vf(adev)) 1121 return 0; 1122 1123 cmd = acquire_psp_cmd_buf(psp); 1124 1125 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1126 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 1127 1128 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1129 if (!ret) { 1130 *boot_cfg = 1131 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 1132 } 1133 1134 release_psp_cmd_buf(psp); 1135 1136 return ret; 1137 } 1138 1139 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 1140 { 1141 int ret; 1142 struct psp_context *psp = &adev->psp; 1143 struct psp_gfx_cmd_resp *cmd; 1144 1145 if (amdgpu_sriov_vf(adev)) 1146 return 0; 1147 1148 cmd = acquire_psp_cmd_buf(psp); 1149 1150 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1151 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 1152 cmd->cmd.boot_cfg.boot_config = boot_cfg; 1153 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 1154 1155 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1156 1157 release_psp_cmd_buf(psp); 1158 1159 return ret; 1160 } 1161 1162 static int psp_rl_load(struct amdgpu_device *adev) 1163 { 1164 int ret; 1165 struct psp_context *psp = &adev->psp; 1166 struct psp_gfx_cmd_resp *cmd; 1167 1168 if (!is_psp_fw_valid(psp->rl)) 1169 return 0; 1170 1171 cmd = acquire_psp_cmd_buf(psp); 1172 1173 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1174 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1175 1176 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1177 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1178 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1179 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1180 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1181 1182 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1183 1184 release_psp_cmd_buf(psp); 1185 1186 return ret; 1187 } 1188 1189 int psp_memory_partition(struct psp_context *psp, int mode) 1190 { 1191 struct psp_gfx_cmd_resp *cmd; 1192 int ret; 1193 1194 if (amdgpu_sriov_vf(psp->adev)) 1195 return 0; 1196 1197 cmd = acquire_psp_cmd_buf(psp); 1198 1199 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; 1200 cmd->cmd.cmd_memory_part.mode = mode; 1201 1202 dev_info(psp->adev->dev, 1203 "Requesting %d memory partition change through PSP", mode); 1204 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1205 if (ret) 1206 dev_err(psp->adev->dev, 1207 "PSP request failed to change to NPS%d mode\n", mode); 1208 1209 release_psp_cmd_buf(psp); 1210 1211 return ret; 1212 } 1213 1214 int psp_spatial_partition(struct psp_context *psp, int mode) 1215 { 1216 struct psp_gfx_cmd_resp *cmd; 1217 int ret; 1218 1219 if (amdgpu_sriov_vf(psp->adev)) 1220 return 0; 1221 1222 cmd = acquire_psp_cmd_buf(psp); 1223 1224 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1225 cmd->cmd.cmd_spatial_part.mode = mode; 1226 1227 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1228 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1229 1230 release_psp_cmd_buf(psp); 1231 1232 return ret; 1233 } 1234 1235 static int psp_asd_initialize(struct psp_context *psp) 1236 { 1237 int ret; 1238 1239 /* If PSP version doesn't match ASD version, asd loading will be failed. 1240 * add workaround to bypass it for sriov now. 1241 * TODO: add version check to make it common 1242 */ 1243 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1244 return 0; 1245 1246 /* bypass asd if display hardware is not available */ 1247 if (!amdgpu_device_has_display_hardware(psp->adev) && 1248 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1249 return 0; 1250 1251 psp->asd_context.mem_context.shared_mc_addr = 0; 1252 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1253 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1254 1255 ret = psp_ta_load(psp, &psp->asd_context); 1256 if (!ret) 1257 psp->asd_context.initialized = true; 1258 1259 return ret; 1260 } 1261 1262 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1263 uint32_t session_id) 1264 { 1265 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1266 cmd->cmd.cmd_unload_ta.session_id = session_id; 1267 } 1268 1269 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1270 { 1271 int ret; 1272 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1273 1274 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1275 1276 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1277 1278 context->resp_status = cmd->resp.status; 1279 1280 release_psp_cmd_buf(psp); 1281 1282 return ret; 1283 } 1284 1285 static int psp_asd_terminate(struct psp_context *psp) 1286 { 1287 int ret; 1288 1289 if (amdgpu_sriov_vf(psp->adev)) 1290 return 0; 1291 1292 if (!psp->asd_context.initialized) 1293 return 0; 1294 1295 ret = psp_ta_unload(psp, &psp->asd_context); 1296 if (!ret) 1297 psp->asd_context.initialized = false; 1298 1299 return ret; 1300 } 1301 1302 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1303 uint32_t id, uint32_t value) 1304 { 1305 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1306 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1307 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1308 } 1309 1310 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1311 uint32_t value) 1312 { 1313 struct psp_gfx_cmd_resp *cmd; 1314 int ret = 0; 1315 1316 if (reg >= PSP_REG_LAST) 1317 return -EINVAL; 1318 1319 cmd = acquire_psp_cmd_buf(psp); 1320 1321 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1322 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1323 if (ret) 1324 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1325 1326 release_psp_cmd_buf(psp); 1327 1328 return ret; 1329 } 1330 1331 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1332 uint64_t ta_bin_mc, 1333 struct ta_context *context) 1334 { 1335 cmd->cmd_id = context->ta_load_type; 1336 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1337 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1338 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1339 1340 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1341 lower_32_bits(context->mem_context.shared_mc_addr); 1342 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1343 upper_32_bits(context->mem_context.shared_mc_addr); 1344 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1345 } 1346 1347 int psp_ta_init_shared_buf(struct psp_context *psp, 1348 struct ta_mem_context *mem_ctx) 1349 { 1350 /* 1351 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1352 * physical) for ta to host memory 1353 */ 1354 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1355 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1356 AMDGPU_GEM_DOMAIN_GTT, 1357 &mem_ctx->shared_bo, 1358 &mem_ctx->shared_mc_addr, 1359 &mem_ctx->shared_buf); 1360 } 1361 1362 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1363 uint32_t ta_cmd_id, 1364 uint32_t session_id) 1365 { 1366 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1367 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1368 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1369 } 1370 1371 int psp_ta_invoke(struct psp_context *psp, 1372 uint32_t ta_cmd_id, 1373 struct ta_context *context) 1374 { 1375 int ret; 1376 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1377 1378 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1379 1380 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1381 psp->fence_buf_mc_addr); 1382 1383 context->resp_status = cmd->resp.status; 1384 1385 release_psp_cmd_buf(psp); 1386 1387 return ret; 1388 } 1389 1390 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1391 { 1392 int ret; 1393 struct psp_gfx_cmd_resp *cmd; 1394 1395 cmd = acquire_psp_cmd_buf(psp); 1396 1397 psp_copy_fw(psp, context->bin_desc.start_addr, 1398 context->bin_desc.size_bytes); 1399 1400 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && 1401 context->mem_context.shared_bo) 1402 context->mem_context.shared_mc_addr = 1403 amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo); 1404 1405 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1406 1407 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1408 psp->fence_buf_mc_addr); 1409 1410 context->resp_status = cmd->resp.status; 1411 1412 if (!ret) 1413 context->session_id = cmd->resp.session_id; 1414 1415 release_psp_cmd_buf(psp); 1416 1417 return ret; 1418 } 1419 1420 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1421 { 1422 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1423 } 1424 1425 int psp_xgmi_terminate(struct psp_context *psp) 1426 { 1427 int ret; 1428 struct amdgpu_device *adev = psp->adev; 1429 1430 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1431 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1432 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1433 adev->gmc.xgmi.connected_to_cpu)) 1434 return 0; 1435 1436 if (!psp->xgmi_context.context.initialized) 1437 return 0; 1438 1439 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1440 1441 psp->xgmi_context.context.initialized = false; 1442 1443 return ret; 1444 } 1445 1446 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1447 { 1448 struct ta_xgmi_shared_memory *xgmi_cmd; 1449 int ret; 1450 1451 if (!psp->ta_fw || 1452 !psp->xgmi_context.context.bin_desc.size_bytes || 1453 !psp->xgmi_context.context.bin_desc.start_addr) 1454 return -ENOENT; 1455 1456 if (!load_ta) 1457 goto invoke; 1458 1459 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1460 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1461 1462 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1463 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1464 if (ret) 1465 return ret; 1466 } 1467 1468 /* Load XGMI TA */ 1469 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1470 if (!ret) 1471 psp->xgmi_context.context.initialized = true; 1472 else 1473 return ret; 1474 1475 invoke: 1476 /* Initialize XGMI session */ 1477 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1478 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1479 xgmi_cmd->flag_extend_link_record = set_extended_data; 1480 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1481 1482 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1483 /* note down the capbility flag for XGMI TA */ 1484 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1485 1486 return ret; 1487 } 1488 1489 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1490 { 1491 struct ta_xgmi_shared_memory *xgmi_cmd; 1492 int ret; 1493 1494 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1495 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1496 1497 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1498 1499 /* Invoke xgmi ta to get hive id */ 1500 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1501 if (ret) 1502 return ret; 1503 1504 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1505 1506 return 0; 1507 } 1508 1509 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1510 { 1511 struct ta_xgmi_shared_memory *xgmi_cmd; 1512 int ret; 1513 1514 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1515 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1516 1517 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1518 1519 /* Invoke xgmi ta to get the node id */ 1520 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1521 if (ret) 1522 return ret; 1523 1524 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1525 1526 return 0; 1527 } 1528 1529 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1530 { 1531 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1532 IP_VERSION(13, 0, 2) && 1533 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1534 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1535 IP_VERSION(13, 0, 6); 1536 } 1537 1538 /* 1539 * Chips that support extended topology information require the driver to 1540 * reflect topology information in the opposite direction. This is 1541 * because the TA has already exceeded its link record limit and if the 1542 * TA holds bi-directional information, the driver would have to do 1543 * multiple fetches instead of just two. 1544 */ 1545 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1546 struct psp_xgmi_node_info node_info) 1547 { 1548 struct amdgpu_device *mirror_adev; 1549 struct amdgpu_hive_info *hive; 1550 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1551 uint64_t dst_node_id = node_info.node_id; 1552 uint8_t dst_num_hops = node_info.num_hops; 1553 uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled; 1554 uint8_t dst_num_links = node_info.num_links; 1555 1556 hive = amdgpu_get_xgmi_hive(psp->adev); 1557 if (WARN_ON(!hive)) 1558 return; 1559 1560 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1561 struct psp_xgmi_topology_info *mirror_top_info; 1562 int j; 1563 1564 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1565 continue; 1566 1567 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1568 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1569 if (mirror_top_info->nodes[j].node_id != src_node_id) 1570 continue; 1571 1572 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1573 mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled; 1574 /* prevent 0 num_links value re-reflection since reflection 1575 * criteria is based on num_hops (direct or indirect). 1576 */ 1577 if (dst_num_links) { 1578 mirror_top_info->nodes[j].num_links = dst_num_links; 1579 /* swap src and dst due to frame of reference */ 1580 for (int k = 0; k < dst_num_links; k++) { 1581 mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num = 1582 node_info.port_num[k].dst_xgmi_port_num; 1583 mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num = 1584 node_info.port_num[k].src_xgmi_port_num; 1585 } 1586 } 1587 1588 break; 1589 } 1590 1591 break; 1592 } 1593 1594 amdgpu_put_xgmi_hive(hive); 1595 } 1596 1597 int psp_xgmi_get_topology_info(struct psp_context *psp, 1598 int number_devices, 1599 struct psp_xgmi_topology_info *topology, 1600 bool get_extended_data) 1601 { 1602 struct ta_xgmi_shared_memory *xgmi_cmd; 1603 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1604 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1605 int i; 1606 int ret; 1607 1608 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1609 return -EINVAL; 1610 1611 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1612 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1613 xgmi_cmd->flag_extend_link_record = get_extended_data; 1614 1615 /* Fill in the shared memory with topology information as input */ 1616 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1617 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1618 topology_info_input->num_nodes = number_devices; 1619 1620 for (i = 0; i < topology_info_input->num_nodes; i++) { 1621 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1622 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1623 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1624 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1625 } 1626 1627 /* Invoke xgmi ta to get the topology information */ 1628 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1629 if (ret) 1630 return ret; 1631 1632 /* Read the output topology information from the shared memory */ 1633 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1634 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1635 for (i = 0; i < topology->num_nodes; i++) { 1636 /* extended data will either be 0 or equal to non-extended data */ 1637 if (topology_info_output->nodes[i].num_hops) 1638 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1639 1640 /* non-extended data gets everything here so no need to update */ 1641 if (!get_extended_data) { 1642 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1643 topology->nodes[i].is_sharing_enabled = 1644 topology_info_output->nodes[i].is_sharing_enabled; 1645 topology->nodes[i].sdma_engine = 1646 topology_info_output->nodes[i].sdma_engine; 1647 } 1648 1649 } 1650 1651 /* Invoke xgmi ta again to get the link information */ 1652 if (psp_xgmi_peer_link_info_supported(psp)) { 1653 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1654 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1655 bool requires_reflection = 1656 (psp->xgmi_context.supports_extended_data && 1657 get_extended_data) || 1658 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1659 IP_VERSION(13, 0, 6) || 1660 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1661 IP_VERSION(13, 0, 14) || 1662 amdgpu_sriov_vf(psp->adev); 1663 bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG || 1664 amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev); 1665 1666 /* popluate the shared output buffer rather than the cmd input buffer 1667 * with node_ids as the input for GET_PEER_LINKS command execution. 1668 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1669 * The same requirement for GET_EXTEND_PEER_LINKS command. 1670 */ 1671 if (ta_port_num_support) { 1672 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1673 1674 for (i = 0; i < topology->num_nodes; i++) 1675 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1676 1677 link_extend_info_output->num_nodes = topology->num_nodes; 1678 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1679 } else { 1680 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1681 1682 for (i = 0; i < topology->num_nodes; i++) 1683 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1684 1685 link_info_output->num_nodes = topology->num_nodes; 1686 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1687 } 1688 1689 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1690 if (ret) 1691 return ret; 1692 1693 for (i = 0; i < topology->num_nodes; i++) { 1694 uint8_t node_num_links = ta_port_num_support ? 1695 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1696 /* accumulate num_links on extended data */ 1697 if (get_extended_data) { 1698 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1699 } else { 1700 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1701 topology->nodes[i].num_links : node_num_links; 1702 } 1703 /* popluate the connected port num info if supported and available */ 1704 if (ta_port_num_support && topology->nodes[i].num_links) { 1705 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1706 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1707 } 1708 1709 /* reflect the topology information for bi-directionality */ 1710 if (requires_reflection && topology->nodes[i].num_hops) 1711 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1712 } 1713 } 1714 1715 return 0; 1716 } 1717 1718 int psp_xgmi_set_topology_info(struct psp_context *psp, 1719 int number_devices, 1720 struct psp_xgmi_topology_info *topology) 1721 { 1722 struct ta_xgmi_shared_memory *xgmi_cmd; 1723 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1724 int i; 1725 1726 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1727 return -EINVAL; 1728 1729 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1730 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1731 1732 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1733 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1734 topology_info_input->num_nodes = number_devices; 1735 1736 for (i = 0; i < topology_info_input->num_nodes; i++) { 1737 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1738 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1739 topology_info_input->nodes[i].is_sharing_enabled = 1; 1740 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1741 } 1742 1743 /* Invoke xgmi ta to set topology information */ 1744 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1745 } 1746 1747 // ras begin 1748 static void psp_ras_ta_check_status(struct psp_context *psp) 1749 { 1750 struct ta_ras_shared_memory *ras_cmd = 1751 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1752 1753 switch (ras_cmd->ras_status) { 1754 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1755 dev_warn(psp->adev->dev, 1756 "RAS WARNING: cmd failed due to unsupported ip\n"); 1757 break; 1758 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1759 dev_warn(psp->adev->dev, 1760 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1761 break; 1762 case TA_RAS_STATUS__SUCCESS: 1763 break; 1764 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1765 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1766 dev_warn(psp->adev->dev, 1767 "RAS WARNING: Inject error to critical region is not allowed\n"); 1768 break; 1769 default: 1770 dev_warn(psp->adev->dev, 1771 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1772 break; 1773 } 1774 } 1775 1776 static int psp_ras_send_cmd(struct psp_context *psp, 1777 enum ras_command cmd_id, void *in, void *out) 1778 { 1779 struct ta_ras_shared_memory *ras_cmd; 1780 uint32_t cmd = cmd_id; 1781 int ret = 0; 1782 1783 if (!in) 1784 return -EINVAL; 1785 1786 mutex_lock(&psp->ras_context.mutex); 1787 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1788 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1789 1790 switch (cmd) { 1791 case TA_RAS_COMMAND__ENABLE_FEATURES: 1792 case TA_RAS_COMMAND__DISABLE_FEATURES: 1793 memcpy(&ras_cmd->ras_in_message, 1794 in, sizeof(ras_cmd->ras_in_message)); 1795 break; 1796 case TA_RAS_COMMAND__TRIGGER_ERROR: 1797 memcpy(&ras_cmd->ras_in_message.trigger_error, 1798 in, sizeof(ras_cmd->ras_in_message.trigger_error)); 1799 break; 1800 case TA_RAS_COMMAND__QUERY_ADDRESS: 1801 memcpy(&ras_cmd->ras_in_message.address, 1802 in, sizeof(ras_cmd->ras_in_message.address)); 1803 break; 1804 default: 1805 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd); 1806 ret = -EINVAL; 1807 goto err_out; 1808 } 1809 1810 ras_cmd->cmd_id = cmd; 1811 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1812 1813 switch (cmd) { 1814 case TA_RAS_COMMAND__TRIGGER_ERROR: 1815 if (!ret && out) 1816 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status)); 1817 break; 1818 case TA_RAS_COMMAND__QUERY_ADDRESS: 1819 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1820 ret = -EINVAL; 1821 else if (out) 1822 memcpy(out, 1823 &ras_cmd->ras_out_message.address, 1824 sizeof(ras_cmd->ras_out_message.address)); 1825 break; 1826 default: 1827 break; 1828 } 1829 1830 err_out: 1831 mutex_unlock(&psp->ras_context.mutex); 1832 1833 return ret; 1834 } 1835 1836 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1837 { 1838 struct ta_ras_shared_memory *ras_cmd; 1839 int ret; 1840 1841 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1842 1843 /* 1844 * TODO: bypass the loading in sriov for now 1845 */ 1846 if (amdgpu_sriov_vf(psp->adev)) 1847 return 0; 1848 1849 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1850 1851 if (amdgpu_ras_intr_triggered()) 1852 return ret; 1853 1854 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1855 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1856 return -EINVAL; 1857 } 1858 1859 if (!ret) { 1860 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1861 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1862 1863 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1864 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1865 dev_warn(psp->adev->dev, 1866 "RAS internal register access blocked\n"); 1867 1868 psp_ras_ta_check_status(psp); 1869 } 1870 1871 return ret; 1872 } 1873 1874 int psp_ras_enable_features(struct psp_context *psp, 1875 union ta_ras_cmd_input *info, bool enable) 1876 { 1877 enum ras_command cmd_id; 1878 int ret; 1879 1880 if (!psp->ras_context.context.initialized || !info) 1881 return -EINVAL; 1882 1883 cmd_id = enable ? 1884 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES; 1885 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL); 1886 if (ret) 1887 return -EINVAL; 1888 1889 return 0; 1890 } 1891 1892 int psp_ras_terminate(struct psp_context *psp) 1893 { 1894 int ret; 1895 1896 /* 1897 * TODO: bypass the terminate in sriov for now 1898 */ 1899 if (amdgpu_sriov_vf(psp->adev)) 1900 return 0; 1901 1902 if (!psp->ras_context.context.initialized) 1903 return 0; 1904 1905 ret = psp_ta_unload(psp, &psp->ras_context.context); 1906 1907 psp->ras_context.context.initialized = false; 1908 1909 mutex_destroy(&psp->ras_context.mutex); 1910 1911 return ret; 1912 } 1913 1914 int psp_ras_initialize(struct psp_context *psp) 1915 { 1916 int ret; 1917 uint32_t boot_cfg = 0xFF; 1918 struct amdgpu_device *adev = psp->adev; 1919 struct ta_ras_shared_memory *ras_cmd; 1920 1921 /* 1922 * TODO: bypass the initialize in sriov for now 1923 */ 1924 if (amdgpu_sriov_vf(adev)) 1925 return 0; 1926 1927 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1928 !adev->psp.ras_context.context.bin_desc.start_addr) { 1929 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1930 return 0; 1931 } 1932 1933 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1934 /* query GECC enablement status from boot config 1935 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1936 */ 1937 ret = psp_boot_config_get(adev, &boot_cfg); 1938 if (ret) 1939 dev_warn(adev->dev, "PSP get boot config failed\n"); 1940 1941 if (boot_cfg == 1 && !adev->ras_default_ecc_enabled && 1942 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1943 dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n"); 1944 dev_warn(adev->dev, 1945 "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n"); 1946 } else { 1947 if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) && 1948 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1949 if (boot_cfg == 1) { 1950 dev_info(adev->dev, "GECC is enabled\n"); 1951 } else { 1952 /* enable GECC in next boot cycle if it is disabled 1953 * in boot config, or force enable GECC if failed to 1954 * get boot configuration 1955 */ 1956 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1957 if (ret) 1958 dev_warn(adev->dev, "PSP set boot config failed\n"); 1959 else 1960 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1961 } 1962 } else { 1963 if (!boot_cfg) { 1964 if (!adev->ras_default_ecc_enabled && 1965 amdgpu_ras_enable != 1 && 1966 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 1967 dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n"); 1968 else 1969 dev_info(adev->dev, "GECC is disabled\n"); 1970 } else { 1971 /* disable GECC in next boot cycle if ras is 1972 * disabled by module parameter amdgpu_ras_enable 1973 * and/or amdgpu_ras_mask, or boot_config_get call 1974 * is failed 1975 */ 1976 ret = psp_boot_config_set(adev, 0); 1977 if (ret) 1978 dev_warn(adev->dev, "PSP set boot config failed\n"); 1979 else 1980 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1981 } 1982 } 1983 } 1984 } 1985 1986 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1987 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1988 1989 if (!psp->ras_context.context.mem_context.shared_buf) { 1990 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1991 if (ret) 1992 return ret; 1993 } 1994 1995 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1996 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1997 1998 if (amdgpu_ras_is_poison_mode_supported(adev)) 1999 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 2000 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 2001 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 2002 ras_cmd->ras_in_message.init_flags.xcc_mask = 2003 adev->gfx.xcc_mask; 2004 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 2005 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 2006 ras_cmd->ras_in_message.init_flags.nps_mode = 2007 adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 2008 ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask; 2009 ras_cmd->ras_in_message.init_flags.vram_type = (uint8_t)adev->gmc.vram_type; 2010 2011 ret = psp_ta_load(psp, &psp->ras_context.context); 2012 2013 if (!ret && !ras_cmd->ras_status) { 2014 psp->ras_context.context.initialized = true; 2015 mutex_init(&psp->ras_context.mutex); 2016 } else { 2017 if (ras_cmd->ras_status) 2018 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 2019 2020 /* fail to load RAS TA */ 2021 psp->ras_context.context.initialized = false; 2022 } 2023 2024 return ret; 2025 } 2026 2027 int psp_ras_trigger_error(struct psp_context *psp, 2028 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 2029 { 2030 struct amdgpu_device *adev = psp->adev; 2031 int ret; 2032 uint32_t dev_mask; 2033 uint32_t ras_status = 0; 2034 2035 if (!psp->ras_context.context.initialized || !info) 2036 return -EINVAL; 2037 2038 switch (info->block_id) { 2039 case TA_RAS_BLOCK__GFX: 2040 dev_mask = GET_MASK(GC, instance_mask); 2041 break; 2042 case TA_RAS_BLOCK__SDMA: 2043 dev_mask = GET_MASK(SDMA0, instance_mask); 2044 break; 2045 case TA_RAS_BLOCK__VCN: 2046 case TA_RAS_BLOCK__JPEG: 2047 dev_mask = GET_MASK(VCN, instance_mask); 2048 break; 2049 default: 2050 dev_mask = instance_mask; 2051 break; 2052 } 2053 2054 /* reuse sub_block_index for backward compatibility */ 2055 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 2056 dev_mask &= AMDGPU_RAS_INST_MASK; 2057 info->sub_block_index |= dev_mask; 2058 2059 ret = psp_ras_send_cmd(psp, 2060 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status); 2061 if (ret) 2062 return -EINVAL; 2063 2064 /* If err_event_athub occurs error inject was successful, however 2065 * return status from TA is no long reliable 2066 */ 2067 if (amdgpu_ras_intr_triggered()) 2068 return 0; 2069 2070 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 2071 return -EACCES; 2072 else if (ras_status) 2073 return -EINVAL; 2074 2075 return 0; 2076 } 2077 2078 int psp_ras_query_address(struct psp_context *psp, 2079 struct ta_ras_query_address_input *addr_in, 2080 struct ta_ras_query_address_output *addr_out) 2081 { 2082 int ret; 2083 2084 if (!psp->ras_context.context.initialized || 2085 !addr_in || !addr_out) 2086 return -EINVAL; 2087 2088 ret = psp_ras_send_cmd(psp, 2089 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out); 2090 2091 return ret; 2092 } 2093 // ras end 2094 2095 // HDCP start 2096 static int psp_hdcp_initialize(struct psp_context *psp) 2097 { 2098 int ret; 2099 2100 /* 2101 * TODO: bypass the initialize in sriov for now 2102 */ 2103 if (amdgpu_sriov_vf(psp->adev)) 2104 return 0; 2105 2106 /* bypass hdcp initialization if dmu is harvested */ 2107 if (!amdgpu_device_has_display_hardware(psp->adev)) 2108 return 0; 2109 2110 if (!psp->hdcp_context.context.bin_desc.size_bytes || 2111 !psp->hdcp_context.context.bin_desc.start_addr) { 2112 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 2113 return 0; 2114 } 2115 2116 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 2117 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2118 2119 if (!psp->hdcp_context.context.mem_context.shared_buf) { 2120 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 2121 if (ret) 2122 return ret; 2123 } 2124 2125 ret = psp_ta_load(psp, &psp->hdcp_context.context); 2126 if (!ret) { 2127 psp->hdcp_context.context.initialized = true; 2128 mutex_init(&psp->hdcp_context.mutex); 2129 } 2130 2131 return ret; 2132 } 2133 2134 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2135 { 2136 /* 2137 * TODO: bypass the loading in sriov for now 2138 */ 2139 if (amdgpu_sriov_vf(psp->adev)) 2140 return 0; 2141 2142 if (!psp->hdcp_context.context.initialized) 2143 return 0; 2144 2145 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 2146 } 2147 2148 static int psp_hdcp_terminate(struct psp_context *psp) 2149 { 2150 int ret; 2151 2152 /* 2153 * TODO: bypass the terminate in sriov for now 2154 */ 2155 if (amdgpu_sriov_vf(psp->adev)) 2156 return 0; 2157 2158 if (!psp->hdcp_context.context.initialized) 2159 return 0; 2160 2161 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 2162 2163 psp->hdcp_context.context.initialized = false; 2164 2165 return ret; 2166 } 2167 // HDCP end 2168 2169 // DTM start 2170 static int psp_dtm_initialize(struct psp_context *psp) 2171 { 2172 int ret; 2173 2174 /* 2175 * TODO: bypass the initialize in sriov for now 2176 */ 2177 if (amdgpu_sriov_vf(psp->adev)) 2178 return 0; 2179 2180 /* bypass dtm initialization if dmu is harvested */ 2181 if (!amdgpu_device_has_display_hardware(psp->adev)) 2182 return 0; 2183 2184 if (!psp->dtm_context.context.bin_desc.size_bytes || 2185 !psp->dtm_context.context.bin_desc.start_addr) { 2186 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 2187 return 0; 2188 } 2189 2190 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 2191 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2192 2193 if (!psp->dtm_context.context.mem_context.shared_buf) { 2194 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 2195 if (ret) 2196 return ret; 2197 } 2198 2199 ret = psp_ta_load(psp, &psp->dtm_context.context); 2200 if (!ret) { 2201 psp->dtm_context.context.initialized = true; 2202 mutex_init(&psp->dtm_context.mutex); 2203 } 2204 2205 return ret; 2206 } 2207 2208 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2209 { 2210 /* 2211 * TODO: bypass the loading in sriov for now 2212 */ 2213 if (amdgpu_sriov_vf(psp->adev)) 2214 return 0; 2215 2216 if (!psp->dtm_context.context.initialized) 2217 return 0; 2218 2219 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 2220 } 2221 2222 static int psp_dtm_terminate(struct psp_context *psp) 2223 { 2224 int ret; 2225 2226 /* 2227 * TODO: bypass the terminate in sriov for now 2228 */ 2229 if (amdgpu_sriov_vf(psp->adev)) 2230 return 0; 2231 2232 if (!psp->dtm_context.context.initialized) 2233 return 0; 2234 2235 ret = psp_ta_unload(psp, &psp->dtm_context.context); 2236 2237 psp->dtm_context.context.initialized = false; 2238 2239 return ret; 2240 } 2241 // DTM end 2242 2243 // RAP start 2244 static int psp_rap_initialize(struct psp_context *psp) 2245 { 2246 int ret; 2247 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 2248 2249 /* 2250 * TODO: bypass the initialize in sriov for now 2251 */ 2252 if (amdgpu_sriov_vf(psp->adev)) 2253 return 0; 2254 2255 if (!psp->rap_context.context.bin_desc.size_bytes || 2256 !psp->rap_context.context.bin_desc.start_addr) { 2257 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 2258 return 0; 2259 } 2260 2261 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 2262 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2263 2264 if (!psp->rap_context.context.mem_context.shared_buf) { 2265 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 2266 if (ret) 2267 return ret; 2268 } 2269 2270 ret = psp_ta_load(psp, &psp->rap_context.context); 2271 if (!ret) { 2272 psp->rap_context.context.initialized = true; 2273 mutex_init(&psp->rap_context.mutex); 2274 } else 2275 return ret; 2276 2277 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2278 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2279 psp_rap_terminate(psp); 2280 /* free rap shared memory */ 2281 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2282 2283 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2284 ret, status); 2285 2286 return ret; 2287 } 2288 2289 return 0; 2290 } 2291 2292 static int psp_rap_terminate(struct psp_context *psp) 2293 { 2294 int ret; 2295 2296 if (!psp->rap_context.context.initialized) 2297 return 0; 2298 2299 ret = psp_ta_unload(psp, &psp->rap_context.context); 2300 2301 psp->rap_context.context.initialized = false; 2302 2303 return ret; 2304 } 2305 2306 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2307 { 2308 struct ta_rap_shared_memory *rap_cmd; 2309 int ret = 0; 2310 2311 if (!psp->rap_context.context.initialized) 2312 return 0; 2313 2314 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2315 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2316 return -EINVAL; 2317 2318 mutex_lock(&psp->rap_context.mutex); 2319 2320 rap_cmd = (struct ta_rap_shared_memory *) 2321 psp->rap_context.context.mem_context.shared_buf; 2322 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2323 2324 rap_cmd->cmd_id = ta_cmd_id; 2325 rap_cmd->validation_method_id = METHOD_A; 2326 2327 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2328 if (ret) 2329 goto out_unlock; 2330 2331 if (status) 2332 *status = rap_cmd->rap_status; 2333 2334 out_unlock: 2335 mutex_unlock(&psp->rap_context.mutex); 2336 2337 return ret; 2338 } 2339 // RAP end 2340 2341 /* securedisplay start */ 2342 static int psp_securedisplay_initialize(struct psp_context *psp) 2343 { 2344 int ret; 2345 struct ta_securedisplay_cmd *securedisplay_cmd; 2346 2347 /* 2348 * TODO: bypass the initialize in sriov for now 2349 */ 2350 if (amdgpu_sriov_vf(psp->adev)) 2351 return 0; 2352 2353 /* bypass securedisplay initialization if dmu is harvested */ 2354 if (!amdgpu_device_has_display_hardware(psp->adev)) 2355 return 0; 2356 2357 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2358 !psp->securedisplay_context.context.bin_desc.start_addr) { 2359 dev_info(psp->adev->dev, 2360 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n"); 2361 return 0; 2362 } 2363 2364 psp->securedisplay_context.context.mem_context.shared_mem_size = 2365 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2366 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2367 2368 if (!psp->securedisplay_context.context.initialized) { 2369 ret = psp_ta_init_shared_buf(psp, 2370 &psp->securedisplay_context.context.mem_context); 2371 if (ret) 2372 return ret; 2373 } 2374 2375 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2376 if (!ret && !psp->securedisplay_context.context.resp_status) { 2377 psp->securedisplay_context.context.initialized = true; 2378 mutex_init(&psp->securedisplay_context.mutex); 2379 } else { 2380 /* don't try again */ 2381 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2382 return ret; 2383 } 2384 2385 mutex_lock(&psp->securedisplay_context.mutex); 2386 2387 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2388 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2389 2390 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2391 2392 mutex_unlock(&psp->securedisplay_context.mutex); 2393 2394 if (ret) { 2395 psp_securedisplay_terminate(psp); 2396 /* free securedisplay shared memory */ 2397 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2398 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2399 return -EINVAL; 2400 } 2401 2402 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2403 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2404 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2405 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2406 /* don't try again */ 2407 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2408 } 2409 2410 return 0; 2411 } 2412 2413 static int psp_securedisplay_terminate(struct psp_context *psp) 2414 { 2415 int ret; 2416 2417 /* 2418 * TODO:bypass the terminate in sriov for now 2419 */ 2420 if (amdgpu_sriov_vf(psp->adev)) 2421 return 0; 2422 2423 if (!psp->securedisplay_context.context.initialized) 2424 return 0; 2425 2426 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2427 2428 psp->securedisplay_context.context.initialized = false; 2429 2430 return ret; 2431 } 2432 2433 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2434 { 2435 int ret; 2436 2437 if (!psp->securedisplay_context.context.initialized) 2438 return -EINVAL; 2439 2440 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2441 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC && 2442 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2) 2443 return -EINVAL; 2444 2445 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2446 2447 return ret; 2448 } 2449 /* SECUREDISPLAY end */ 2450 2451 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2452 { 2453 struct psp_context *psp = &adev->psp; 2454 int ret = 0; 2455 2456 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2457 ret = psp->funcs->wait_for_bootloader(psp); 2458 2459 return ret; 2460 } 2461 2462 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2463 { 2464 if (psp->funcs && 2465 psp->funcs->get_ras_capability) { 2466 return psp->funcs->get_ras_capability(psp); 2467 } else { 2468 return false; 2469 } 2470 } 2471 2472 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) 2473 { 2474 struct psp_context *psp = &adev->psp; 2475 2476 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2477 return false; 2478 2479 if (psp->funcs && psp->funcs->is_reload_needed) 2480 return psp->funcs->is_reload_needed(psp); 2481 2482 return false; 2483 } 2484 2485 static void psp_update_gpu_addresses(struct amdgpu_device *adev) 2486 { 2487 struct psp_context *psp = &adev->psp; 2488 2489 if (psp->cmd_buf_bo && psp->cmd_buf_mem) { 2490 psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo); 2491 psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo); 2492 psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo); 2493 } 2494 if (adev->firmware.rbuf && psp->km_ring.ring_mem) 2495 psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf); 2496 } 2497 2498 static int psp_hw_start(struct psp_context *psp) 2499 { 2500 struct amdgpu_device *adev = psp->adev; 2501 int ret; 2502 2503 if (amdgpu_virt_xgmi_migrate_enabled(adev)) 2504 psp_update_gpu_addresses(adev); 2505 2506 if (!amdgpu_sriov_vf(adev)) { 2507 if ((is_psp_fw_valid(psp->kdb)) && 2508 (psp->funcs->bootloader_load_kdb != NULL)) { 2509 ret = psp_bootloader_load_kdb(psp); 2510 if (ret) { 2511 dev_err(adev->dev, "PSP load kdb failed!\n"); 2512 return ret; 2513 } 2514 } 2515 2516 if ((is_psp_fw_valid(psp->spl)) && 2517 (psp->funcs->bootloader_load_spl != NULL)) { 2518 ret = psp_bootloader_load_spl(psp); 2519 if (ret) { 2520 dev_err(adev->dev, "PSP load spl failed!\n"); 2521 return ret; 2522 } 2523 } 2524 2525 if ((is_psp_fw_valid(psp->sys)) && 2526 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2527 ret = psp_bootloader_load_sysdrv(psp); 2528 if (ret) { 2529 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2530 return ret; 2531 } 2532 } 2533 2534 if ((is_psp_fw_valid(psp->soc_drv)) && 2535 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2536 ret = psp_bootloader_load_soc_drv(psp); 2537 if (ret) { 2538 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2539 return ret; 2540 } 2541 } 2542 2543 if ((is_psp_fw_valid(psp->intf_drv)) && 2544 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2545 ret = psp_bootloader_load_intf_drv(psp); 2546 if (ret) { 2547 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2548 return ret; 2549 } 2550 } 2551 2552 if ((is_psp_fw_valid(psp->dbg_drv)) && 2553 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2554 ret = psp_bootloader_load_dbg_drv(psp); 2555 if (ret) { 2556 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2557 return ret; 2558 } 2559 } 2560 2561 if ((is_psp_fw_valid(psp->ras_drv)) && 2562 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2563 ret = psp_bootloader_load_ras_drv(psp); 2564 if (ret) { 2565 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2566 return ret; 2567 } 2568 } 2569 2570 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2571 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2572 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2573 if (ret) { 2574 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2575 return ret; 2576 } 2577 } 2578 2579 if ((is_psp_fw_valid(psp->spdm_drv)) && 2580 (psp->funcs->bootloader_load_spdm_drv != NULL)) { 2581 ret = psp_bootloader_load_spdm_drv(psp); 2582 if (ret) { 2583 dev_err(adev->dev, "PSP load spdm_drv failed!\n"); 2584 return ret; 2585 } 2586 } 2587 2588 if ((is_psp_fw_valid(psp->sos)) && 2589 (psp->funcs->bootloader_load_sos != NULL)) { 2590 ret = psp_bootloader_load_sos(psp); 2591 if (ret) { 2592 dev_err(adev->dev, "PSP load sos failed!\n"); 2593 return ret; 2594 } 2595 } 2596 } 2597 2598 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2599 if (ret) { 2600 dev_err(adev->dev, "PSP create ring failed!\n"); 2601 return ret; 2602 } 2603 2604 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2605 ret = psp_update_fw_reservation(psp); 2606 if (ret) { 2607 dev_err(adev->dev, "update fw reservation failed!\n"); 2608 return ret; 2609 } 2610 } 2611 2612 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2613 goto skip_pin_bo; 2614 2615 if (!psp->boot_time_tmr || psp->autoload_supported) { 2616 ret = psp_tmr_init(psp); 2617 if (ret) { 2618 dev_err(adev->dev, "PSP tmr init failed!\n"); 2619 return ret; 2620 } 2621 } 2622 2623 skip_pin_bo: 2624 /* 2625 * For ASICs with DF Cstate management centralized 2626 * to PMFW, TMR setup should be performed after PMFW 2627 * loaded and before other non-psp firmware loaded. 2628 */ 2629 if (psp->pmfw_centralized_cstate_management) { 2630 ret = psp_load_smu_fw(psp); 2631 if (ret) 2632 return ret; 2633 } 2634 2635 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2636 ret = psp_tmr_load(psp); 2637 if (ret) { 2638 dev_err(adev->dev, "PSP load tmr failed!\n"); 2639 return ret; 2640 } 2641 } 2642 2643 return 0; 2644 } 2645 2646 int amdgpu_psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2647 enum psp_gfx_fw_type *type) 2648 { 2649 switch (ucode->ucode_id) { 2650 case AMDGPU_UCODE_ID_CAP: 2651 *type = GFX_FW_TYPE_CAP; 2652 break; 2653 case AMDGPU_UCODE_ID_SDMA0: 2654 *type = GFX_FW_TYPE_SDMA0; 2655 break; 2656 case AMDGPU_UCODE_ID_SDMA1: 2657 *type = GFX_FW_TYPE_SDMA1; 2658 break; 2659 case AMDGPU_UCODE_ID_SDMA2: 2660 *type = GFX_FW_TYPE_SDMA2; 2661 break; 2662 case AMDGPU_UCODE_ID_SDMA3: 2663 *type = GFX_FW_TYPE_SDMA3; 2664 break; 2665 case AMDGPU_UCODE_ID_SDMA4: 2666 *type = GFX_FW_TYPE_SDMA4; 2667 break; 2668 case AMDGPU_UCODE_ID_SDMA5: 2669 *type = GFX_FW_TYPE_SDMA5; 2670 break; 2671 case AMDGPU_UCODE_ID_SDMA6: 2672 *type = GFX_FW_TYPE_SDMA6; 2673 break; 2674 case AMDGPU_UCODE_ID_SDMA7: 2675 *type = GFX_FW_TYPE_SDMA7; 2676 break; 2677 case AMDGPU_UCODE_ID_CP_MES: 2678 *type = GFX_FW_TYPE_CP_MES; 2679 break; 2680 case AMDGPU_UCODE_ID_CP_MES_DATA: 2681 *type = GFX_FW_TYPE_MES_STACK; 2682 break; 2683 case AMDGPU_UCODE_ID_CP_MES1: 2684 *type = GFX_FW_TYPE_CP_MES_KIQ; 2685 break; 2686 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2687 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2688 break; 2689 case AMDGPU_UCODE_ID_CP_CE: 2690 *type = GFX_FW_TYPE_CP_CE; 2691 break; 2692 case AMDGPU_UCODE_ID_CP_PFP: 2693 *type = GFX_FW_TYPE_CP_PFP; 2694 break; 2695 case AMDGPU_UCODE_ID_CP_ME: 2696 *type = GFX_FW_TYPE_CP_ME; 2697 break; 2698 case AMDGPU_UCODE_ID_CP_MEC1: 2699 *type = GFX_FW_TYPE_CP_MEC; 2700 break; 2701 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2702 *type = GFX_FW_TYPE_CP_MEC_ME1; 2703 break; 2704 case AMDGPU_UCODE_ID_CP_MEC2: 2705 *type = GFX_FW_TYPE_CP_MEC; 2706 break; 2707 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2708 *type = GFX_FW_TYPE_CP_MEC_ME2; 2709 break; 2710 case AMDGPU_UCODE_ID_RLC_P: 2711 *type = GFX_FW_TYPE_RLC_P; 2712 break; 2713 case AMDGPU_UCODE_ID_RLC_V: 2714 *type = GFX_FW_TYPE_RLC_V; 2715 break; 2716 case AMDGPU_UCODE_ID_RLC_G: 2717 *type = GFX_FW_TYPE_RLC_G; 2718 break; 2719 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2720 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2721 break; 2722 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2723 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2724 break; 2725 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2726 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2727 break; 2728 case AMDGPU_UCODE_ID_RLC_IRAM: 2729 *type = GFX_FW_TYPE_RLC_IRAM; 2730 break; 2731 case AMDGPU_UCODE_ID_RLC_DRAM: 2732 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2733 break; 2734 case AMDGPU_UCODE_ID_RLC_IRAM_1: 2735 *type = GFX_FW_TYPE_RLX6_UCODE_CORE1; 2736 break; 2737 case AMDGPU_UCODE_ID_RLC_DRAM_1: 2738 *type = GFX_FW_TYPE_RLX6_DRAM_BOOT_CORE1; 2739 break; 2740 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2741 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2742 break; 2743 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2744 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2745 break; 2746 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2747 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2748 break; 2749 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2750 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2751 break; 2752 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2753 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2754 break; 2755 case AMDGPU_UCODE_ID_SMC: 2756 *type = GFX_FW_TYPE_SMU; 2757 break; 2758 case AMDGPU_UCODE_ID_PPTABLE: 2759 *type = GFX_FW_TYPE_PPTABLE; 2760 break; 2761 case AMDGPU_UCODE_ID_UVD: 2762 *type = GFX_FW_TYPE_UVD; 2763 break; 2764 case AMDGPU_UCODE_ID_UVD1: 2765 *type = GFX_FW_TYPE_UVD1; 2766 break; 2767 case AMDGPU_UCODE_ID_VCE: 2768 *type = GFX_FW_TYPE_VCE; 2769 break; 2770 case AMDGPU_UCODE_ID_VCN: 2771 *type = GFX_FW_TYPE_VCN; 2772 break; 2773 case AMDGPU_UCODE_ID_VCN1: 2774 *type = GFX_FW_TYPE_VCN1; 2775 break; 2776 case AMDGPU_UCODE_ID_DMCU_ERAM: 2777 *type = GFX_FW_TYPE_DMCU_ERAM; 2778 break; 2779 case AMDGPU_UCODE_ID_DMCU_INTV: 2780 *type = GFX_FW_TYPE_DMCU_ISR; 2781 break; 2782 case AMDGPU_UCODE_ID_VCN0_RAM: 2783 *type = GFX_FW_TYPE_VCN0_RAM; 2784 break; 2785 case AMDGPU_UCODE_ID_VCN1_RAM: 2786 *type = GFX_FW_TYPE_VCN1_RAM; 2787 break; 2788 case AMDGPU_UCODE_ID_DMCUB: 2789 *type = GFX_FW_TYPE_DMUB; 2790 break; 2791 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2792 case AMDGPU_UCODE_ID_SDMA_RS64: 2793 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2794 break; 2795 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2796 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2797 break; 2798 case AMDGPU_UCODE_ID_IMU_I: 2799 *type = GFX_FW_TYPE_IMU_I; 2800 break; 2801 case AMDGPU_UCODE_ID_IMU_D: 2802 *type = GFX_FW_TYPE_IMU_D; 2803 break; 2804 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2805 *type = GFX_FW_TYPE_RS64_PFP; 2806 break; 2807 case AMDGPU_UCODE_ID_CP_RS64_ME: 2808 *type = GFX_FW_TYPE_RS64_ME; 2809 break; 2810 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2811 *type = GFX_FW_TYPE_RS64_MEC; 2812 break; 2813 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2814 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2815 break; 2816 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2817 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2818 break; 2819 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2820 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2821 break; 2822 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2823 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2824 break; 2825 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2826 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2827 break; 2828 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2829 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2830 break; 2831 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2832 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2833 break; 2834 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2835 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2836 break; 2837 case AMDGPU_UCODE_ID_VPE_CTX: 2838 *type = GFX_FW_TYPE_VPEC_FW1; 2839 break; 2840 case AMDGPU_UCODE_ID_VPE_CTL: 2841 *type = GFX_FW_TYPE_VPEC_FW2; 2842 break; 2843 case AMDGPU_UCODE_ID_VPE: 2844 *type = GFX_FW_TYPE_VPE; 2845 break; 2846 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2847 *type = GFX_FW_TYPE_UMSCH_UCODE; 2848 break; 2849 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2850 *type = GFX_FW_TYPE_UMSCH_DATA; 2851 break; 2852 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2853 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2854 break; 2855 case AMDGPU_UCODE_ID_P2S_TABLE: 2856 *type = GFX_FW_TYPE_P2S_TABLE; 2857 break; 2858 case AMDGPU_UCODE_ID_JPEG_RAM: 2859 *type = GFX_FW_TYPE_JPEG_RAM; 2860 break; 2861 case AMDGPU_UCODE_ID_ISP: 2862 *type = GFX_FW_TYPE_ISP; 2863 break; 2864 case AMDGPU_UCODE_ID_MAXIMUM: 2865 default: 2866 return -EINVAL; 2867 } 2868 2869 return 0; 2870 } 2871 2872 static void psp_print_fw_hdr(struct psp_context *psp, 2873 struct amdgpu_firmware_info *ucode) 2874 { 2875 struct amdgpu_device *adev = psp->adev; 2876 struct common_firmware_header *hdr; 2877 2878 switch (ucode->ucode_id) { 2879 case AMDGPU_UCODE_ID_SDMA0: 2880 case AMDGPU_UCODE_ID_SDMA1: 2881 case AMDGPU_UCODE_ID_SDMA2: 2882 case AMDGPU_UCODE_ID_SDMA3: 2883 case AMDGPU_UCODE_ID_SDMA4: 2884 case AMDGPU_UCODE_ID_SDMA5: 2885 case AMDGPU_UCODE_ID_SDMA6: 2886 case AMDGPU_UCODE_ID_SDMA7: 2887 hdr = (struct common_firmware_header *) 2888 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2889 amdgpu_ucode_print_sdma_hdr(hdr); 2890 break; 2891 case AMDGPU_UCODE_ID_CP_CE: 2892 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2893 amdgpu_ucode_print_gfx_hdr(hdr); 2894 break; 2895 case AMDGPU_UCODE_ID_CP_PFP: 2896 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2897 amdgpu_ucode_print_gfx_hdr(hdr); 2898 break; 2899 case AMDGPU_UCODE_ID_CP_ME: 2900 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2901 amdgpu_ucode_print_gfx_hdr(hdr); 2902 break; 2903 case AMDGPU_UCODE_ID_CP_MEC1: 2904 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2905 amdgpu_ucode_print_gfx_hdr(hdr); 2906 break; 2907 case AMDGPU_UCODE_ID_RLC_G: 2908 case AMDGPU_UCODE_ID_RLC_DRAM_1: 2909 case AMDGPU_UCODE_ID_RLC_IRAM_1: 2910 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2911 amdgpu_ucode_print_rlc_hdr(hdr); 2912 break; 2913 case AMDGPU_UCODE_ID_SMC: 2914 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2915 amdgpu_ucode_print_smc_hdr(hdr); 2916 break; 2917 default: 2918 break; 2919 } 2920 } 2921 2922 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2923 struct amdgpu_firmware_info *ucode, 2924 struct psp_gfx_cmd_resp *cmd) 2925 { 2926 int ret; 2927 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2928 2929 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2930 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2931 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2932 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2933 2934 ret = psp_get_fw_type(psp, ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2935 if (ret) 2936 dev_err(psp->adev->dev, "Unknown firmware type %d\n", ucode->ucode_id); 2937 return ret; 2938 } 2939 2940 int psp_execute_ip_fw_load(struct psp_context *psp, 2941 struct amdgpu_firmware_info *ucode) 2942 { 2943 int ret = 0; 2944 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2945 2946 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2947 if (!ret) { 2948 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2949 psp->fence_buf_mc_addr); 2950 } 2951 2952 release_psp_cmd_buf(psp); 2953 2954 return ret; 2955 } 2956 2957 static int psp_load_p2s_table(struct psp_context *psp) 2958 { 2959 int ret; 2960 struct amdgpu_device *adev = psp->adev; 2961 struct amdgpu_firmware_info *ucode = 2962 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2963 2964 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2965 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2966 return 0; 2967 2968 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 2969 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 2970 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2971 0x0036003C; 2972 if (psp->sos.fw_version < supp_vers) 2973 return 0; 2974 } 2975 2976 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2977 return 0; 2978 2979 ret = psp_execute_ip_fw_load(psp, ucode); 2980 2981 return ret; 2982 } 2983 2984 static int psp_load_smu_fw(struct psp_context *psp) 2985 { 2986 int ret; 2987 struct amdgpu_device *adev = psp->adev; 2988 struct amdgpu_firmware_info *ucode = 2989 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2990 struct amdgpu_ras *ras = psp->ras_context.ras; 2991 2992 /* 2993 * Skip SMU FW reloading in case of using BACO for runpm only, 2994 * as SMU is always alive. 2995 */ 2996 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2997 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2998 return 0; 2999 3000 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 3001 return 0; 3002 3003 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 3004 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 3005 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 3006 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 3007 if (ret) 3008 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 3009 } 3010 3011 ret = psp_execute_ip_fw_load(psp, ucode); 3012 3013 if (ret) 3014 dev_err(adev->dev, "PSP load smu failed!\n"); 3015 3016 return ret; 3017 } 3018 3019 static bool fw_load_skip_check(struct psp_context *psp, 3020 struct amdgpu_firmware_info *ucode) 3021 { 3022 if (!ucode->fw || !ucode->ucode_size) 3023 return true; 3024 3025 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 3026 return true; 3027 3028 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 3029 (psp_smu_reload_quirk(psp) || 3030 psp->autoload_supported || 3031 psp->pmfw_centralized_cstate_management)) 3032 return true; 3033 3034 if (amdgpu_sriov_vf(psp->adev) && 3035 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 3036 return true; 3037 3038 if (psp->autoload_supported && 3039 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 3040 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 3041 /* skip mec JT when autoload is enabled */ 3042 return true; 3043 3044 return false; 3045 } 3046 3047 int psp_load_fw_list(struct psp_context *psp, 3048 struct amdgpu_firmware_info **ucode_list, int ucode_count) 3049 { 3050 int ret = 0, i; 3051 struct amdgpu_firmware_info *ucode; 3052 3053 for (i = 0; i < ucode_count; ++i) { 3054 ucode = ucode_list[i]; 3055 psp_print_fw_hdr(psp, ucode); 3056 ret = psp_execute_ip_fw_load(psp, ucode); 3057 if (ret) 3058 return ret; 3059 } 3060 return ret; 3061 } 3062 3063 static int psp_load_non_psp_fw(struct psp_context *psp) 3064 { 3065 int i, ret; 3066 struct amdgpu_firmware_info *ucode; 3067 struct amdgpu_device *adev = psp->adev; 3068 3069 if (psp->autoload_supported && 3070 !psp->pmfw_centralized_cstate_management) { 3071 ret = psp_load_smu_fw(psp); 3072 if (ret) 3073 return ret; 3074 } 3075 3076 /* Load P2S table first if it's available */ 3077 psp_load_p2s_table(psp); 3078 3079 for (i = 0; i < adev->firmware.max_ucodes; i++) { 3080 ucode = &adev->firmware.ucode[i]; 3081 3082 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 3083 !fw_load_skip_check(psp, ucode)) { 3084 ret = psp_load_smu_fw(psp); 3085 if (ret) 3086 return ret; 3087 continue; 3088 } 3089 3090 if (fw_load_skip_check(psp, ucode)) 3091 continue; 3092 3093 if (psp->autoload_supported && 3094 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3095 IP_VERSION(11, 0, 7) || 3096 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3097 IP_VERSION(11, 0, 11) || 3098 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3099 IP_VERSION(11, 0, 12) || 3100 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3101 IP_VERSION(15, 0, 0) || 3102 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3103 IP_VERSION(15, 0, 8)) && 3104 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 3105 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 3106 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 3107 /* PSP only receive one SDMA fw for sienna_cichlid, 3108 * as all four sdma fw are same 3109 */ 3110 continue; 3111 3112 psp_print_fw_hdr(psp, ucode); 3113 3114 ret = psp_execute_ip_fw_load(psp, ucode); 3115 if (ret) 3116 return ret; 3117 3118 /* Start rlc autoload after psp received all the gfx firmware */ 3119 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 3120 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 3121 ret = psp_rlc_autoload_start(psp); 3122 if (ret) { 3123 dev_err(adev->dev, "Failed to start rlc autoload\n"); 3124 return ret; 3125 } 3126 } 3127 } 3128 3129 return 0; 3130 } 3131 3132 static int psp_load_fw(struct amdgpu_device *adev) 3133 { 3134 int ret; 3135 struct psp_context *psp = &adev->psp; 3136 3137 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3138 /* should not destroy ring, only stop */ 3139 psp_ring_stop(psp, PSP_RING_TYPE__KM); 3140 } else { 3141 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 3142 3143 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 3144 if (ret) { 3145 dev_err(adev->dev, "PSP ring init failed!\n"); 3146 goto failed; 3147 } 3148 } 3149 3150 ret = psp_hw_start(psp); 3151 if (ret) 3152 goto failed; 3153 3154 ret = psp_load_non_psp_fw(psp); 3155 if (ret) 3156 goto failed1; 3157 3158 ret = psp_asd_initialize(psp); 3159 if (ret) { 3160 dev_err(adev->dev, "PSP load asd failed!\n"); 3161 goto failed1; 3162 } 3163 3164 ret = psp_rl_load(adev); 3165 if (ret) { 3166 dev_err(adev->dev, "PSP load RL failed!\n"); 3167 goto failed1; 3168 } 3169 3170 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3171 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3172 ret = psp_xgmi_initialize(psp, false, true); 3173 /* Warning the XGMI seesion initialize failure 3174 * Instead of stop driver initialization 3175 */ 3176 if (ret) 3177 dev_err(psp->adev->dev, 3178 "XGMI: Failed to initialize XGMI session\n"); 3179 } 3180 } 3181 3182 if (psp->ta_fw) { 3183 ret = psp_ras_initialize(psp); 3184 if (ret) 3185 dev_err(psp->adev->dev, 3186 "RAS: Failed to initialize RAS\n"); 3187 3188 ret = psp_hdcp_initialize(psp); 3189 if (ret) 3190 dev_err(psp->adev->dev, 3191 "HDCP: Failed to initialize HDCP\n"); 3192 3193 ret = psp_dtm_initialize(psp); 3194 if (ret) 3195 dev_err(psp->adev->dev, 3196 "DTM: Failed to initialize DTM\n"); 3197 3198 ret = psp_rap_initialize(psp); 3199 if (ret) 3200 dev_err(psp->adev->dev, 3201 "RAP: Failed to initialize RAP\n"); 3202 3203 ret = psp_securedisplay_initialize(psp); 3204 if (ret) 3205 dev_err(psp->adev->dev, 3206 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3207 } 3208 3209 return 0; 3210 3211 failed1: 3212 psp_free_shared_bufs(psp); 3213 failed: 3214 /* 3215 * all cleanup jobs (xgmi terminate, ras terminate, 3216 * ring destroy, cmd/fence/fw buffers destory, 3217 * psp->cmd destory) are delayed to psp_hw_fini 3218 */ 3219 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3220 return ret; 3221 } 3222 3223 static int psp_hw_init(struct amdgpu_ip_block *ip_block) 3224 { 3225 int ret; 3226 struct amdgpu_device *adev = ip_block->adev; 3227 3228 mutex_lock(&adev->firmware.mutex); 3229 3230 ret = amdgpu_ucode_init_bo(adev); 3231 if (ret) 3232 goto failed; 3233 3234 ret = psp_load_fw(adev); 3235 if (ret) { 3236 dev_err(adev->dev, "PSP firmware loading failed\n"); 3237 goto failed; 3238 } 3239 3240 mutex_unlock(&adev->firmware.mutex); 3241 return 0; 3242 3243 failed: 3244 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 3245 mutex_unlock(&adev->firmware.mutex); 3246 return -EINVAL; 3247 } 3248 3249 static int psp_hw_fini(struct amdgpu_ip_block *ip_block) 3250 { 3251 struct amdgpu_device *adev = ip_block->adev; 3252 struct psp_context *psp = &adev->psp; 3253 3254 if (psp->ta_fw) { 3255 psp_ras_terminate(psp); 3256 psp_securedisplay_terminate(psp); 3257 psp_rap_terminate(psp); 3258 psp_dtm_terminate(psp); 3259 psp_hdcp_terminate(psp); 3260 3261 if (adev->gmc.xgmi.num_physical_nodes > 1) 3262 psp_xgmi_terminate(psp); 3263 } 3264 3265 psp_asd_terminate(psp); 3266 psp_tmr_terminate(psp); 3267 3268 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3269 3270 return 0; 3271 } 3272 3273 static int psp_suspend(struct amdgpu_ip_block *ip_block) 3274 { 3275 int ret = 0; 3276 struct amdgpu_device *adev = ip_block->adev; 3277 struct psp_context *psp = &adev->psp; 3278 3279 if (adev->gmc.xgmi.num_physical_nodes > 1 && 3280 psp->xgmi_context.context.initialized) { 3281 ret = psp_xgmi_terminate(psp); 3282 if (ret) { 3283 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 3284 goto out; 3285 } 3286 } 3287 3288 if (psp->ta_fw) { 3289 ret = psp_ras_terminate(psp); 3290 if (ret) { 3291 dev_err(adev->dev, "Failed to terminate ras ta\n"); 3292 goto out; 3293 } 3294 ret = psp_hdcp_terminate(psp); 3295 if (ret) { 3296 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 3297 goto out; 3298 } 3299 ret = psp_dtm_terminate(psp); 3300 if (ret) { 3301 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 3302 goto out; 3303 } 3304 ret = psp_rap_terminate(psp); 3305 if (ret) { 3306 dev_err(adev->dev, "Failed to terminate rap ta\n"); 3307 goto out; 3308 } 3309 ret = psp_securedisplay_terminate(psp); 3310 if (ret) { 3311 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 3312 goto out; 3313 } 3314 } 3315 3316 ret = psp_asd_terminate(psp); 3317 if (ret) { 3318 dev_err(adev->dev, "Failed to terminate asd\n"); 3319 goto out; 3320 } 3321 3322 ret = psp_tmr_terminate(psp); 3323 if (ret) { 3324 dev_err(adev->dev, "Failed to terminate tmr\n"); 3325 goto out; 3326 } 3327 3328 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 3329 if (ret) 3330 dev_err(adev->dev, "PSP ring stop failed\n"); 3331 3332 out: 3333 return ret; 3334 } 3335 3336 static int psp_resume(struct amdgpu_ip_block *ip_block) 3337 { 3338 int ret; 3339 struct amdgpu_device *adev = ip_block->adev; 3340 struct psp_context *psp = &adev->psp; 3341 3342 dev_info(adev->dev, "PSP is resuming...\n"); 3343 3344 if (psp->mem_train_ctx.enable_mem_training) { 3345 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3346 if (ret) { 3347 dev_err(adev->dev, "Failed to process memory training!\n"); 3348 return ret; 3349 } 3350 } 3351 3352 mutex_lock(&adev->firmware.mutex); 3353 3354 ret = amdgpu_ucode_init_bo(adev); 3355 if (ret) 3356 goto failed; 3357 3358 ret = psp_hw_start(psp); 3359 if (ret) 3360 goto failed; 3361 3362 ret = psp_load_non_psp_fw(psp); 3363 if (ret) 3364 goto failed; 3365 3366 ret = psp_asd_initialize(psp); 3367 if (ret) { 3368 dev_err(adev->dev, "PSP load asd failed!\n"); 3369 goto failed; 3370 } 3371 3372 ret = psp_rl_load(adev); 3373 if (ret) { 3374 dev_err(adev->dev, "PSP load RL failed!\n"); 3375 goto failed; 3376 } 3377 3378 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3379 ret = psp_xgmi_initialize(psp, false, true); 3380 /* Warning the XGMI seesion initialize failure 3381 * Instead of stop driver initialization 3382 */ 3383 if (ret) 3384 dev_err(psp->adev->dev, 3385 "XGMI: Failed to initialize XGMI session\n"); 3386 } 3387 3388 if (psp->ta_fw) { 3389 ret = psp_ras_initialize(psp); 3390 if (ret) 3391 dev_err(psp->adev->dev, 3392 "RAS: Failed to initialize RAS\n"); 3393 3394 ret = psp_hdcp_initialize(psp); 3395 if (ret) 3396 dev_err(psp->adev->dev, 3397 "HDCP: Failed to initialize HDCP\n"); 3398 3399 ret = psp_dtm_initialize(psp); 3400 if (ret) 3401 dev_err(psp->adev->dev, 3402 "DTM: Failed to initialize DTM\n"); 3403 3404 ret = psp_rap_initialize(psp); 3405 if (ret) 3406 dev_err(psp->adev->dev, 3407 "RAP: Failed to initialize RAP\n"); 3408 3409 ret = psp_securedisplay_initialize(psp); 3410 if (ret) 3411 dev_err(psp->adev->dev, 3412 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3413 } 3414 3415 mutex_unlock(&adev->firmware.mutex); 3416 3417 return 0; 3418 3419 failed: 3420 dev_err(adev->dev, "PSP resume failed\n"); 3421 mutex_unlock(&adev->firmware.mutex); 3422 return ret; 3423 } 3424 3425 int psp_gpu_reset(struct amdgpu_device *adev) 3426 { 3427 int ret; 3428 3429 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3430 return 0; 3431 3432 mutex_lock(&adev->psp.mutex); 3433 ret = psp_mode1_reset(&adev->psp); 3434 mutex_unlock(&adev->psp.mutex); 3435 3436 return ret; 3437 } 3438 3439 int psp_rlc_autoload_start(struct psp_context *psp) 3440 { 3441 int ret; 3442 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3443 3444 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3445 3446 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3447 psp->fence_buf_mc_addr); 3448 3449 release_psp_cmd_buf(psp); 3450 3451 return ret; 3452 } 3453 3454 int psp_ring_cmd_submit(struct psp_context *psp, 3455 uint64_t cmd_buf_mc_addr, 3456 uint64_t fence_mc_addr, 3457 int index) 3458 { 3459 unsigned int psp_write_ptr_reg = 0; 3460 struct psp_gfx_rb_frame *write_frame; 3461 struct psp_ring *ring = &psp->km_ring; 3462 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3463 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3464 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3465 struct amdgpu_device *adev = psp->adev; 3466 uint32_t ring_size_dw = ring->ring_size / 4; 3467 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3468 3469 /* KM (GPCOM) prepare write pointer */ 3470 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3471 3472 /* Update KM RB frame pointer to new frame */ 3473 /* write_frame ptr increments by size of rb_frame in bytes */ 3474 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3475 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3476 write_frame = ring_buffer_start; 3477 else 3478 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3479 /* Check invalid write_frame ptr address */ 3480 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3481 dev_err(adev->dev, 3482 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3483 ring_buffer_start, ring_buffer_end, write_frame); 3484 dev_err(adev->dev, 3485 "write_frame is pointing to address out of bounds\n"); 3486 return -EINVAL; 3487 } 3488 3489 /* Initialize KM RB frame */ 3490 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3491 3492 /* Update KM RB frame */ 3493 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3494 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3495 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3496 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3497 write_frame->fence_value = index; 3498 amdgpu_device_flush_hdp(adev, NULL); 3499 3500 /* Update the write Pointer in DWORDs */ 3501 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3502 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3503 return 0; 3504 } 3505 3506 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3507 { 3508 struct amdgpu_device *adev = psp->adev; 3509 const struct psp_firmware_header_v1_0 *asd_hdr; 3510 int err = 0; 3511 3512 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED, 3513 "amdgpu/%s_asd.bin", chip_name); 3514 if (err) 3515 goto out; 3516 3517 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3518 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3519 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3520 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3521 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3522 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3523 return 0; 3524 out: 3525 amdgpu_ucode_release(&adev->psp.asd_fw); 3526 return err; 3527 } 3528 3529 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3530 { 3531 struct amdgpu_device *adev = psp->adev; 3532 const struct psp_firmware_header_v1_0 *toc_hdr; 3533 int err = 0; 3534 3535 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED, 3536 "amdgpu/%s_toc.bin", chip_name); 3537 if (err) 3538 goto out; 3539 3540 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3541 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3542 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3543 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3544 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3545 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3546 return 0; 3547 out: 3548 amdgpu_ucode_release(&adev->psp.toc_fw); 3549 return err; 3550 } 3551 3552 static int parse_sos_bin_descriptor(struct psp_context *psp, 3553 const struct psp_fw_bin_desc *desc, 3554 const struct psp_firmware_header_v2_0 *sos_hdr) 3555 { 3556 uint8_t *ucode_start_addr = NULL; 3557 3558 if (!psp || !desc || !sos_hdr) 3559 return -EINVAL; 3560 3561 ucode_start_addr = (uint8_t *)sos_hdr + 3562 le32_to_cpu(desc->offset_bytes) + 3563 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3564 3565 switch (desc->fw_type) { 3566 case PSP_FW_TYPE_PSP_SOS: 3567 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3568 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3569 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3570 psp->sos.start_addr = ucode_start_addr; 3571 break; 3572 case PSP_FW_TYPE_PSP_SYS_DRV: 3573 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3574 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3575 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3576 psp->sys.start_addr = ucode_start_addr; 3577 break; 3578 case PSP_FW_TYPE_PSP_KDB: 3579 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3580 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3581 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3582 psp->kdb.start_addr = ucode_start_addr; 3583 break; 3584 case PSP_FW_TYPE_PSP_TOC: 3585 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3586 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3587 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3588 psp->toc.start_addr = ucode_start_addr; 3589 break; 3590 case PSP_FW_TYPE_PSP_SPL: 3591 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3592 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3593 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3594 psp->spl.start_addr = ucode_start_addr; 3595 break; 3596 case PSP_FW_TYPE_PSP_RL: 3597 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3598 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3599 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3600 psp->rl.start_addr = ucode_start_addr; 3601 break; 3602 case PSP_FW_TYPE_PSP_SOC_DRV: 3603 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3604 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3605 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3606 psp->soc_drv.start_addr = ucode_start_addr; 3607 break; 3608 case PSP_FW_TYPE_PSP_INTF_DRV: 3609 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3610 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3611 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3612 psp->intf_drv.start_addr = ucode_start_addr; 3613 break; 3614 case PSP_FW_TYPE_PSP_DBG_DRV: 3615 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3616 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3617 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3618 psp->dbg_drv.start_addr = ucode_start_addr; 3619 break; 3620 case PSP_FW_TYPE_PSP_RAS_DRV: 3621 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3622 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3623 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3624 psp->ras_drv.start_addr = ucode_start_addr; 3625 break; 3626 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3627 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3628 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3629 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3630 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3631 break; 3632 case PSP_FW_TYPE_PSP_SPDM_DRV: 3633 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version); 3634 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version); 3635 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3636 psp->spdm_drv.start_addr = ucode_start_addr; 3637 break; 3638 default: 3639 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3640 break; 3641 } 3642 3643 return 0; 3644 } 3645 3646 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3647 { 3648 const struct psp_firmware_header_v1_0 *sos_hdr; 3649 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3650 uint8_t *ucode_array_start_addr; 3651 3652 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3653 ucode_array_start_addr = (uint8_t *)sos_hdr + 3654 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3655 3656 if (adev->gmc.xgmi.connected_to_cpu || 3657 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3658 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3659 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3660 3661 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3662 adev->psp.sys.start_addr = ucode_array_start_addr; 3663 3664 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3665 adev->psp.sos.start_addr = ucode_array_start_addr + 3666 le32_to_cpu(sos_hdr->sos.offset_bytes); 3667 } else { 3668 /* Load alternate PSP SOS FW */ 3669 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3670 3671 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3672 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3673 3674 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3675 adev->psp.sys.start_addr = ucode_array_start_addr + 3676 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3677 3678 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3679 adev->psp.sos.start_addr = ucode_array_start_addr + 3680 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3681 } 3682 3683 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3684 dev_warn(adev->dev, "PSP SOS FW not available"); 3685 return -EINVAL; 3686 } 3687 3688 return 0; 3689 } 3690 3691 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3692 { 3693 struct amdgpu_device *adev = psp->adev; 3694 const struct psp_firmware_header_v1_0 *sos_hdr; 3695 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3696 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3697 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3698 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3699 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; 3700 int fw_index, fw_bin_count, start_index = 0; 3701 const struct psp_fw_bin_desc *fw_bin; 3702 uint8_t *ucode_array_start_addr; 3703 int err = 0; 3704 3705 if (amdgpu_is_kicker_fw(adev)) 3706 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3707 "amdgpu/%s_sos_kicker.bin", chip_name); 3708 else 3709 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3710 "amdgpu/%s_sos.bin", chip_name); 3711 if (err) 3712 goto out; 3713 3714 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3715 ucode_array_start_addr = (uint8_t *)sos_hdr + 3716 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3717 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3718 3719 switch (sos_hdr->header.header_version_major) { 3720 case 1: 3721 err = psp_init_sos_base_fw(adev); 3722 if (err) 3723 goto out; 3724 3725 if (sos_hdr->header.header_version_minor == 1) { 3726 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3727 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3728 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3729 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3730 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3731 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3732 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3733 } 3734 if (sos_hdr->header.header_version_minor == 2) { 3735 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3736 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3737 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3738 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3739 } 3740 if (sos_hdr->header.header_version_minor == 3) { 3741 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3742 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3743 adev->psp.toc.start_addr = ucode_array_start_addr + 3744 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3745 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3746 adev->psp.kdb.start_addr = ucode_array_start_addr + 3747 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3748 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3749 adev->psp.spl.start_addr = ucode_array_start_addr + 3750 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3751 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3752 adev->psp.rl.start_addr = ucode_array_start_addr + 3753 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3754 } 3755 break; 3756 case 2: 3757 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3758 3759 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); 3760 3761 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { 3762 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3763 err = -EINVAL; 3764 goto out; 3765 } 3766 3767 if (sos_hdr_v2_0->header.header_version_minor == 1) { 3768 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; 3769 3770 fw_bin = sos_hdr_v2_1->psp_fw_bin; 3771 3772 if (psp_is_aux_sos_load_required(psp)) 3773 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3774 else 3775 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3776 3777 } else { 3778 fw_bin = sos_hdr_v2_0->psp_fw_bin; 3779 } 3780 3781 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { 3782 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, 3783 sos_hdr_v2_0); 3784 if (err) 3785 goto out; 3786 } 3787 break; 3788 default: 3789 dev_err(adev->dev, 3790 "unsupported psp sos firmware\n"); 3791 err = -EINVAL; 3792 goto out; 3793 } 3794 3795 return 0; 3796 out: 3797 amdgpu_ucode_release(&adev->psp.sos_fw); 3798 3799 return err; 3800 } 3801 3802 static bool is_ta_fw_applicable(struct psp_context *psp, 3803 const struct psp_fw_bin_desc *desc) 3804 { 3805 struct amdgpu_device *adev = psp->adev; 3806 uint32_t fw_version; 3807 3808 switch (desc->fw_type) { 3809 case TA_FW_TYPE_PSP_XGMI: 3810 case TA_FW_TYPE_PSP_XGMI_AUX: 3811 /* for now, AUX TA only exists on 13.0.6 ta bin, 3812 * from v20.00.0x.14 3813 */ 3814 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3815 IP_VERSION(13, 0, 6)) { 3816 fw_version = le32_to_cpu(desc->fw_version); 3817 3818 if (adev->flags & AMD_IS_APU && 3819 (fw_version & 0xff) >= 0x14) 3820 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; 3821 else 3822 return desc->fw_type == TA_FW_TYPE_PSP_XGMI; 3823 } 3824 break; 3825 default: 3826 break; 3827 } 3828 3829 return true; 3830 } 3831 3832 static int parse_ta_bin_descriptor(struct psp_context *psp, 3833 const struct psp_fw_bin_desc *desc, 3834 const struct ta_firmware_header_v2_0 *ta_hdr) 3835 { 3836 uint8_t *ucode_start_addr = NULL; 3837 3838 if (!psp || !desc || !ta_hdr) 3839 return -EINVAL; 3840 3841 if (!is_ta_fw_applicable(psp, desc)) 3842 return 0; 3843 3844 ucode_start_addr = (uint8_t *)ta_hdr + 3845 le32_to_cpu(desc->offset_bytes) + 3846 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3847 3848 switch (desc->fw_type) { 3849 case TA_FW_TYPE_PSP_ASD: 3850 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3851 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3852 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3853 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3854 break; 3855 case TA_FW_TYPE_PSP_XGMI: 3856 case TA_FW_TYPE_PSP_XGMI_AUX: 3857 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3858 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3859 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3860 break; 3861 case TA_FW_TYPE_PSP_RAS: 3862 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3863 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3864 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3865 break; 3866 case TA_FW_TYPE_PSP_HDCP: 3867 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3868 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3869 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3870 break; 3871 case TA_FW_TYPE_PSP_DTM: 3872 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3873 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3874 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3875 break; 3876 case TA_FW_TYPE_PSP_RAP: 3877 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3878 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3879 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3880 break; 3881 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3882 psp->securedisplay_context.context.bin_desc.fw_version = 3883 le32_to_cpu(desc->fw_version); 3884 psp->securedisplay_context.context.bin_desc.size_bytes = 3885 le32_to_cpu(desc->size_bytes); 3886 psp->securedisplay_context.context.bin_desc.start_addr = 3887 ucode_start_addr; 3888 break; 3889 default: 3890 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3891 break; 3892 } 3893 3894 return 0; 3895 } 3896 3897 static int parse_ta_v1_microcode(struct psp_context *psp) 3898 { 3899 const struct ta_firmware_header_v1_0 *ta_hdr; 3900 struct amdgpu_device *adev = psp->adev; 3901 3902 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3903 3904 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3905 return -EINVAL; 3906 3907 adev->psp.xgmi_context.context.bin_desc.fw_version = 3908 le32_to_cpu(ta_hdr->xgmi.fw_version); 3909 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3910 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3911 adev->psp.xgmi_context.context.bin_desc.start_addr = 3912 (uint8_t *)ta_hdr + 3913 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3914 3915 adev->psp.ras_context.context.bin_desc.fw_version = 3916 le32_to_cpu(ta_hdr->ras.fw_version); 3917 adev->psp.ras_context.context.bin_desc.size_bytes = 3918 le32_to_cpu(ta_hdr->ras.size_bytes); 3919 adev->psp.ras_context.context.bin_desc.start_addr = 3920 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3921 le32_to_cpu(ta_hdr->ras.offset_bytes); 3922 3923 adev->psp.hdcp_context.context.bin_desc.fw_version = 3924 le32_to_cpu(ta_hdr->hdcp.fw_version); 3925 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3926 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3927 adev->psp.hdcp_context.context.bin_desc.start_addr = 3928 (uint8_t *)ta_hdr + 3929 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3930 3931 adev->psp.dtm_context.context.bin_desc.fw_version = 3932 le32_to_cpu(ta_hdr->dtm.fw_version); 3933 adev->psp.dtm_context.context.bin_desc.size_bytes = 3934 le32_to_cpu(ta_hdr->dtm.size_bytes); 3935 adev->psp.dtm_context.context.bin_desc.start_addr = 3936 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3937 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3938 3939 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3940 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3941 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3942 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3943 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3944 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3945 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3946 3947 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3948 3949 return 0; 3950 } 3951 3952 static int parse_ta_v2_microcode(struct psp_context *psp) 3953 { 3954 const struct ta_firmware_header_v2_0 *ta_hdr; 3955 struct amdgpu_device *adev = psp->adev; 3956 int err = 0; 3957 int ta_index = 0; 3958 3959 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3960 3961 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3962 return -EINVAL; 3963 3964 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3965 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3966 return -EINVAL; 3967 } 3968 3969 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3970 err = parse_ta_bin_descriptor(psp, 3971 &ta_hdr->ta_fw_bin[ta_index], 3972 ta_hdr); 3973 if (err) 3974 return err; 3975 } 3976 3977 return 0; 3978 } 3979 3980 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3981 { 3982 const struct common_firmware_header *hdr; 3983 struct amdgpu_device *adev = psp->adev; 3984 int err; 3985 3986 if (amdgpu_is_kicker_fw(adev)) 3987 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3988 "amdgpu/%s_ta_kicker.bin", chip_name); 3989 else 3990 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3991 "amdgpu/%s_ta.bin", chip_name); 3992 if (err) 3993 return err; 3994 3995 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3996 switch (le16_to_cpu(hdr->header_version_major)) { 3997 case 1: 3998 err = parse_ta_v1_microcode(psp); 3999 break; 4000 case 2: 4001 err = parse_ta_v2_microcode(psp); 4002 break; 4003 default: 4004 dev_err(adev->dev, "unsupported TA header version\n"); 4005 err = -EINVAL; 4006 } 4007 4008 if (err) 4009 amdgpu_ucode_release(&adev->psp.ta_fw); 4010 4011 return err; 4012 } 4013 4014 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 4015 { 4016 struct amdgpu_device *adev = psp->adev; 4017 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 4018 struct amdgpu_firmware_info *info = NULL; 4019 int err = 0; 4020 4021 if (!amdgpu_sriov_vf(adev)) { 4022 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 4023 return -EINVAL; 4024 } 4025 4026 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL, 4027 "amdgpu/%s_cap.bin", chip_name); 4028 if (err) { 4029 if (err == -ENODEV) { 4030 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 4031 err = 0; 4032 } else { 4033 dev_err(adev->dev, "fail to initialize cap microcode\n"); 4034 } 4035 goto out; 4036 } 4037 4038 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 4039 info->ucode_id = AMDGPU_UCODE_ID_CAP; 4040 info->fw = adev->psp.cap_fw; 4041 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 4042 adev->psp.cap_fw->data; 4043 adev->firmware.fw_size += ALIGN( 4044 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 4045 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 4046 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 4047 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 4048 4049 return 0; 4050 4051 out: 4052 amdgpu_ucode_release(&adev->psp.cap_fw); 4053 return err; 4054 } 4055 4056 int psp_config_sq_perfmon(struct psp_context *psp, 4057 uint32_t xcp_id, bool core_override_enable, 4058 bool reg_override_enable, bool perfmon_override_enable) 4059 { 4060 int ret; 4061 4062 if (amdgpu_sriov_vf(psp->adev)) 4063 return 0; 4064 4065 if (xcp_id > MAX_XCP) { 4066 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); 4067 return -EINVAL; 4068 } 4069 4070 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { 4071 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", 4072 amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); 4073 return -EINVAL; 4074 } 4075 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 4076 4077 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; 4078 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); 4079 cmd->cmd.config_sq_perfmon.core_override = core_override_enable; 4080 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; 4081 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; 4082 4083 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 4084 if (ret) 4085 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", 4086 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); 4087 4088 release_psp_cmd_buf(psp); 4089 return ret; 4090 } 4091 4092 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 4093 enum amd_clockgating_state state) 4094 { 4095 return 0; 4096 } 4097 4098 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block, 4099 enum amd_powergating_state state) 4100 { 4101 return 0; 4102 } 4103 4104 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 4105 struct device_attribute *attr, 4106 char *buf) 4107 { 4108 struct drm_device *ddev = dev_get_drvdata(dev); 4109 struct amdgpu_device *adev = drm_to_adev(ddev); 4110 struct amdgpu_ip_block *ip_block; 4111 uint32_t fw_ver; 4112 int ret; 4113 4114 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4115 if (!ip_block || !ip_block->status.late_initialized) { 4116 dev_info(adev->dev, "PSP block is not ready yet\n."); 4117 return -EBUSY; 4118 } 4119 4120 mutex_lock(&adev->psp.mutex); 4121 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 4122 mutex_unlock(&adev->psp.mutex); 4123 4124 if (ret) { 4125 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 4126 return ret; 4127 } 4128 4129 return sysfs_emit(buf, "%x\n", fw_ver); 4130 } 4131 4132 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 4133 struct device_attribute *attr, 4134 const char *buf, 4135 size_t count) 4136 { 4137 struct drm_device *ddev = dev_get_drvdata(dev); 4138 struct amdgpu_device *adev = drm_to_adev(ddev); 4139 int ret, idx; 4140 const struct firmware *usbc_pd_fw; 4141 struct amdgpu_bo *fw_buf_bo = NULL; 4142 uint64_t fw_pri_mc_addr; 4143 void *fw_pri_cpu_addr; 4144 struct amdgpu_ip_block *ip_block; 4145 4146 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4147 if (!ip_block || !ip_block->status.late_initialized) { 4148 dev_err(adev->dev, "PSP block is not ready yet."); 4149 return -EBUSY; 4150 } 4151 4152 if (!drm_dev_enter(ddev, &idx)) 4153 return -ENODEV; 4154 4155 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED, 4156 "amdgpu/%s", buf); 4157 if (ret) 4158 goto fail; 4159 4160 /* LFB address which is aligned to 1MB boundary per PSP request */ 4161 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 4162 AMDGPU_GEM_DOMAIN_VRAM | 4163 AMDGPU_GEM_DOMAIN_GTT, 4164 &fw_buf_bo, &fw_pri_mc_addr, 4165 &fw_pri_cpu_addr); 4166 if (ret) 4167 goto rel_buf; 4168 4169 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 4170 4171 mutex_lock(&adev->psp.mutex); 4172 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 4173 mutex_unlock(&adev->psp.mutex); 4174 4175 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4176 4177 rel_buf: 4178 amdgpu_ucode_release(&usbc_pd_fw); 4179 fail: 4180 if (ret) { 4181 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 4182 count = ret; 4183 } 4184 4185 drm_dev_exit(idx); 4186 return count; 4187 } 4188 4189 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 4190 { 4191 int idx; 4192 4193 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 4194 return; 4195 4196 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 4197 memcpy(psp->fw_pri_buf, start_addr, bin_size); 4198 4199 drm_dev_exit(idx); 4200 } 4201 4202 /** 4203 * DOC: usbc_pd_fw 4204 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 4205 * this file will trigger the update process. 4206 */ 4207 static DEVICE_ATTR(usbc_pd_fw, 0644, 4208 psp_usbc_pd_fw_sysfs_read, 4209 psp_usbc_pd_fw_sysfs_write); 4210 4211 int is_psp_fw_valid(struct psp_bin_desc bin) 4212 { 4213 return bin.size_bytes; 4214 } 4215 4216 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 4217 const struct bin_attribute *bin_attr, 4218 char *buffer, loff_t pos, size_t count) 4219 { 4220 struct device *dev = kobj_to_dev(kobj); 4221 struct drm_device *ddev = dev_get_drvdata(dev); 4222 struct amdgpu_device *adev = drm_to_adev(ddev); 4223 4224 adev->psp.vbflash_done = false; 4225 4226 /* Safeguard against memory drain */ 4227 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 4228 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 4229 kvfree(adev->psp.vbflash_tmp_buf); 4230 adev->psp.vbflash_tmp_buf = NULL; 4231 adev->psp.vbflash_image_size = 0; 4232 return -ENOMEM; 4233 } 4234 4235 /* TODO Just allocate max for now and optimize to realloc later if needed */ 4236 if (!adev->psp.vbflash_tmp_buf) { 4237 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 4238 if (!adev->psp.vbflash_tmp_buf) 4239 return -ENOMEM; 4240 } 4241 4242 mutex_lock(&adev->psp.mutex); 4243 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 4244 adev->psp.vbflash_image_size += count; 4245 mutex_unlock(&adev->psp.mutex); 4246 4247 dev_dbg(adev->dev, "IFWI staged for update\n"); 4248 4249 return count; 4250 } 4251 4252 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 4253 const struct bin_attribute *bin_attr, char *buffer, 4254 loff_t pos, size_t count) 4255 { 4256 struct device *dev = kobj_to_dev(kobj); 4257 struct drm_device *ddev = dev_get_drvdata(dev); 4258 struct amdgpu_device *adev = drm_to_adev(ddev); 4259 struct amdgpu_bo *fw_buf_bo = NULL; 4260 uint64_t fw_pri_mc_addr; 4261 void *fw_pri_cpu_addr; 4262 int ret; 4263 4264 if (adev->psp.vbflash_image_size == 0) 4265 return -EINVAL; 4266 4267 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 4268 4269 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 4270 AMDGPU_GPU_PAGE_SIZE, 4271 AMDGPU_GEM_DOMAIN_VRAM, 4272 &fw_buf_bo, 4273 &fw_pri_mc_addr, 4274 &fw_pri_cpu_addr); 4275 if (ret) 4276 goto rel_buf; 4277 4278 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 4279 4280 mutex_lock(&adev->psp.mutex); 4281 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 4282 mutex_unlock(&adev->psp.mutex); 4283 4284 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4285 4286 rel_buf: 4287 kvfree(adev->psp.vbflash_tmp_buf); 4288 adev->psp.vbflash_tmp_buf = NULL; 4289 adev->psp.vbflash_image_size = 0; 4290 4291 if (ret) { 4292 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 4293 return ret; 4294 } 4295 4296 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 4297 return 0; 4298 } 4299 4300 /** 4301 * DOC: psp_vbflash 4302 * Writing to this file will stage an IFWI for update. Reading from this file 4303 * will trigger the update process. 4304 */ 4305 static const struct bin_attribute psp_vbflash_bin_attr = { 4306 .attr = {.name = "psp_vbflash", .mode = 0660}, 4307 .size = 0, 4308 .write = amdgpu_psp_vbflash_write, 4309 .read = amdgpu_psp_vbflash_read, 4310 }; 4311 4312 /** 4313 * DOC: psp_vbflash_status 4314 * The status of the flash process. 4315 * 0: IFWI flash not complete. 4316 * 1: IFWI flash complete. 4317 */ 4318 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 4319 struct device_attribute *attr, 4320 char *buf) 4321 { 4322 struct drm_device *ddev = dev_get_drvdata(dev); 4323 struct amdgpu_device *adev = drm_to_adev(ddev); 4324 uint32_t vbflash_status; 4325 4326 vbflash_status = psp_vbflash_status(&adev->psp); 4327 if (!adev->psp.vbflash_done) 4328 vbflash_status = 0; 4329 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 4330 vbflash_status = 1; 4331 4332 return sysfs_emit(buf, "0x%x\n", vbflash_status); 4333 } 4334 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 4335 4336 static const struct bin_attribute *const bin_flash_attrs[] = { 4337 &psp_vbflash_bin_attr, 4338 NULL 4339 }; 4340 4341 static struct attribute *flash_attrs[] = { 4342 &dev_attr_psp_vbflash_status.attr, 4343 &dev_attr_usbc_pd_fw.attr, 4344 NULL 4345 }; 4346 4347 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 4348 { 4349 struct device *dev = kobj_to_dev(kobj); 4350 struct drm_device *ddev = dev_get_drvdata(dev); 4351 struct amdgpu_device *adev = drm_to_adev(ddev); 4352 4353 if (attr == &dev_attr_usbc_pd_fw.attr) 4354 return adev->psp.sup_pd_fw_up ? 0660 : 0; 4355 4356 return adev->psp.sup_ifwi_up ? 0440 : 0; 4357 } 4358 4359 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 4360 const struct bin_attribute *attr, 4361 int idx) 4362 { 4363 struct device *dev = kobj_to_dev(kobj); 4364 struct drm_device *ddev = dev_get_drvdata(dev); 4365 struct amdgpu_device *adev = drm_to_adev(ddev); 4366 4367 return adev->psp.sup_ifwi_up ? 0660 : 0; 4368 } 4369 4370 const struct attribute_group amdgpu_flash_attr_group = { 4371 .attrs = flash_attrs, 4372 .bin_attrs = bin_flash_attrs, 4373 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 4374 .is_visible = amdgpu_flash_attr_is_visible, 4375 }; 4376 4377 #if defined(CONFIG_DEBUG_FS) 4378 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp) 4379 { 4380 struct amdgpu_device *adev = filp->f_inode->i_private; 4381 struct spirom_bo *bo_triplet; 4382 int ret; 4383 4384 /* serialize the open() file calling */ 4385 if (!mutex_trylock(&adev->psp.mutex)) 4386 return -EBUSY; 4387 4388 /* 4389 * make sure only one userpace process is alive for dumping so that 4390 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed. 4391 * let's say the case where one process try opening the file while 4392 * another one has proceeded to read or release. In this way, eliminate 4393 * the use of mutex for read() or release() callback as well. 4394 */ 4395 if (adev->psp.spirom_dump_trip) { 4396 mutex_unlock(&adev->psp.mutex); 4397 return -EBUSY; 4398 } 4399 4400 bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL); 4401 if (!bo_triplet) { 4402 mutex_unlock(&adev->psp.mutex); 4403 return -ENOMEM; 4404 } 4405 4406 ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2, 4407 AMDGPU_GPU_PAGE_SIZE, 4408 AMDGPU_GEM_DOMAIN_GTT, 4409 &bo_triplet->bo, 4410 &bo_triplet->mc_addr, 4411 &bo_triplet->cpu_addr); 4412 if (ret) 4413 goto rel_trip; 4414 4415 ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr); 4416 if (ret) 4417 goto rel_bo; 4418 4419 adev->psp.spirom_dump_trip = bo_triplet; 4420 mutex_unlock(&adev->psp.mutex); 4421 return 0; 4422 rel_bo: 4423 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4424 &bo_triplet->cpu_addr); 4425 rel_trip: 4426 kfree(bo_triplet); 4427 mutex_unlock(&adev->psp.mutex); 4428 dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret); 4429 return ret; 4430 } 4431 4432 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size, 4433 loff_t *pos) 4434 { 4435 struct amdgpu_device *adev = filp->f_inode->i_private; 4436 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4437 4438 if (!bo_triplet) 4439 return -EINVAL; 4440 4441 return simple_read_from_buffer(buf, 4442 size, 4443 pos, bo_triplet->cpu_addr, 4444 AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4445 } 4446 4447 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp) 4448 { 4449 struct amdgpu_device *adev = filp->f_inode->i_private; 4450 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4451 4452 if (bo_triplet) { 4453 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4454 &bo_triplet->cpu_addr); 4455 kfree(bo_triplet); 4456 } 4457 4458 adev->psp.spirom_dump_trip = NULL; 4459 return 0; 4460 } 4461 4462 static const struct file_operations psp_dump_spirom_debugfs_ops = { 4463 .owner = THIS_MODULE, 4464 .open = psp_read_spirom_debugfs_open, 4465 .read = psp_read_spirom_debugfs_read, 4466 .release = psp_read_spirom_debugfs_release, 4467 .llseek = default_llseek, 4468 }; 4469 #endif 4470 4471 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev) 4472 { 4473 #if defined(CONFIG_DEBUG_FS) 4474 struct drm_minor *minor = adev_to_drm(adev)->primary; 4475 4476 debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root, 4477 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4478 #endif 4479 } 4480 4481 const struct amd_ip_funcs psp_ip_funcs = { 4482 .name = "psp", 4483 .early_init = psp_early_init, 4484 .sw_init = psp_sw_init, 4485 .sw_fini = psp_sw_fini, 4486 .hw_init = psp_hw_init, 4487 .hw_fini = psp_hw_fini, 4488 .suspend = psp_suspend, 4489 .resume = psp_resume, 4490 .set_clockgating_state = psp_set_clockgating_state, 4491 .set_powergating_state = psp_set_powergating_state, 4492 }; 4493 4494 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 4495 .type = AMD_IP_BLOCK_TYPE_PSP, 4496 .major = 3, 4497 .minor = 1, 4498 .rev = 0, 4499 .funcs = &psp_ip_funcs, 4500 }; 4501 4502 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 4503 .type = AMD_IP_BLOCK_TYPE_PSP, 4504 .major = 10, 4505 .minor = 0, 4506 .rev = 0, 4507 .funcs = &psp_ip_funcs, 4508 }; 4509 4510 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 4511 .type = AMD_IP_BLOCK_TYPE_PSP, 4512 .major = 11, 4513 .minor = 0, 4514 .rev = 0, 4515 .funcs = &psp_ip_funcs, 4516 }; 4517 4518 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 4519 .type = AMD_IP_BLOCK_TYPE_PSP, 4520 .major = 11, 4521 .minor = 0, 4522 .rev = 8, 4523 .funcs = &psp_ip_funcs, 4524 }; 4525 4526 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 4527 .type = AMD_IP_BLOCK_TYPE_PSP, 4528 .major = 12, 4529 .minor = 0, 4530 .rev = 0, 4531 .funcs = &psp_ip_funcs, 4532 }; 4533 4534 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4535 .type = AMD_IP_BLOCK_TYPE_PSP, 4536 .major = 13, 4537 .minor = 0, 4538 .rev = 0, 4539 .funcs = &psp_ip_funcs, 4540 }; 4541 4542 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4543 .type = AMD_IP_BLOCK_TYPE_PSP, 4544 .major = 13, 4545 .minor = 0, 4546 .rev = 4, 4547 .funcs = &psp_ip_funcs, 4548 }; 4549 4550 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4551 .type = AMD_IP_BLOCK_TYPE_PSP, 4552 .major = 14, 4553 .minor = 0, 4554 .rev = 0, 4555 .funcs = &psp_ip_funcs, 4556 }; 4557 4558 const struct amdgpu_ip_block_version psp_v15_0_ip_block = { 4559 .type = AMD_IP_BLOCK_TYPE_PSP, 4560 .major = 15, 4561 .minor = 0, 4562 .rev = 0, 4563 .funcs = &psp_ip_funcs, 4564 }; 4565 4566 const struct amdgpu_ip_block_version psp_v15_0_8_ip_block = { 4567 .type = AMD_IP_BLOCK_TYPE_PSP, 4568 .major = 15, 4569 .minor = 0, 4570 .rev = 8, 4571 .funcs = &psp_ip_funcs, 4572 }; 4573