1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 43 #include "amdgpu_ras.h" 44 #include "amdgpu_securedisplay.h" 45 #include "amdgpu_atomfirmware.h" 46 47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16) 48 49 static int psp_load_smu_fw(struct psp_context *psp); 50 static int psp_rap_terminate(struct psp_context *psp); 51 static int psp_securedisplay_terminate(struct psp_context *psp); 52 53 static int psp_ring_init(struct psp_context *psp, 54 enum psp_ring_type ring_type) 55 { 56 int ret = 0; 57 struct psp_ring *ring; 58 struct amdgpu_device *adev = psp->adev; 59 60 ring = &psp->km_ring; 61 62 ring->ring_type = ring_type; 63 64 /* allocate 4k Page of Local Frame Buffer memory for ring */ 65 ring->ring_size = 0x1000; 66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 67 AMDGPU_GEM_DOMAIN_VRAM | 68 AMDGPU_GEM_DOMAIN_GTT, 69 &adev->firmware.rbuf, 70 &ring->ring_mem_mc_addr, 71 (void **)&ring->ring_mem); 72 if (ret) { 73 ring->ring_size = 0; 74 return ret; 75 } 76 77 return 0; 78 } 79 80 /* 81 * Due to DF Cstate management centralized to PMFW, the firmware 82 * loading sequence will be updated as below: 83 * - Load KDB 84 * - Load SYS_DRV 85 * - Load tOS 86 * - Load PMFW 87 * - Setup TMR 88 * - Load other non-psp fw 89 * - Load ASD 90 * - Load XGMI/RAS/HDCP/DTM TA if any 91 * 92 * This new sequence is required for 93 * - Arcturus and onwards 94 */ 95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 96 { 97 struct amdgpu_device *adev = psp->adev; 98 99 if (amdgpu_sriov_vf(adev)) { 100 psp->pmfw_centralized_cstate_management = false; 101 return; 102 } 103 104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 105 case IP_VERSION(11, 0, 0): 106 case IP_VERSION(11, 0, 4): 107 case IP_VERSION(11, 0, 5): 108 case IP_VERSION(11, 0, 7): 109 case IP_VERSION(11, 0, 9): 110 case IP_VERSION(11, 0, 11): 111 case IP_VERSION(11, 0, 12): 112 case IP_VERSION(11, 0, 13): 113 case IP_VERSION(13, 0, 0): 114 case IP_VERSION(13, 0, 2): 115 case IP_VERSION(13, 0, 7): 116 psp->pmfw_centralized_cstate_management = true; 117 break; 118 default: 119 psp->pmfw_centralized_cstate_management = false; 120 break; 121 } 122 } 123 124 static int psp_init_sriov_microcode(struct psp_context *psp) 125 { 126 struct amdgpu_device *adev = psp->adev; 127 char ucode_prefix[30]; 128 int ret = 0; 129 130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 131 132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 133 case IP_VERSION(9, 0, 0): 134 case IP_VERSION(11, 0, 7): 135 case IP_VERSION(11, 0, 9): 136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 137 ret = psp_init_cap_microcode(psp, ucode_prefix); 138 break; 139 case IP_VERSION(13, 0, 2): 140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 141 ret = psp_init_cap_microcode(psp, ucode_prefix); 142 ret &= psp_init_ta_microcode(psp, ucode_prefix); 143 break; 144 case IP_VERSION(13, 0, 0): 145 adev->virt.autoload_ucode_id = 0; 146 break; 147 case IP_VERSION(13, 0, 6): 148 case IP_VERSION(13, 0, 14): 149 ret = psp_init_cap_microcode(psp, ucode_prefix); 150 ret &= psp_init_ta_microcode(psp, ucode_prefix); 151 break; 152 case IP_VERSION(13, 0, 10): 153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 154 ret = psp_init_cap_microcode(psp, ucode_prefix); 155 break; 156 case IP_VERSION(13, 0, 12): 157 ret = psp_init_ta_microcode(psp, ucode_prefix); 158 break; 159 default: 160 return -EINVAL; 161 } 162 return ret; 163 } 164 165 static int psp_early_init(struct amdgpu_ip_block *ip_block) 166 { 167 struct amdgpu_device *adev = ip_block->adev; 168 struct psp_context *psp = &adev->psp; 169 170 psp->autoload_supported = true; 171 psp->boot_time_tmr = true; 172 173 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 174 case IP_VERSION(9, 0, 0): 175 psp_v3_1_set_psp_funcs(psp); 176 psp->autoload_supported = false; 177 psp->boot_time_tmr = false; 178 break; 179 case IP_VERSION(10, 0, 0): 180 case IP_VERSION(10, 0, 1): 181 psp_v10_0_set_psp_funcs(psp); 182 psp->autoload_supported = false; 183 psp->boot_time_tmr = false; 184 break; 185 case IP_VERSION(11, 0, 2): 186 case IP_VERSION(11, 0, 4): 187 psp_v11_0_set_psp_funcs(psp); 188 psp->autoload_supported = false; 189 psp->boot_time_tmr = false; 190 break; 191 case IP_VERSION(11, 0, 0): 192 case IP_VERSION(11, 0, 7): 193 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 194 fallthrough; 195 case IP_VERSION(11, 0, 5): 196 case IP_VERSION(11, 0, 9): 197 case IP_VERSION(11, 0, 11): 198 case IP_VERSION(11, 5, 0): 199 case IP_VERSION(11, 5, 2): 200 case IP_VERSION(11, 0, 12): 201 case IP_VERSION(11, 0, 13): 202 psp_v11_0_set_psp_funcs(psp); 203 psp->boot_time_tmr = false; 204 break; 205 case IP_VERSION(11, 0, 3): 206 case IP_VERSION(12, 0, 1): 207 psp_v12_0_set_psp_funcs(psp); 208 psp->autoload_supported = false; 209 psp->boot_time_tmr = false; 210 break; 211 case IP_VERSION(13, 0, 2): 212 psp->boot_time_tmr = false; 213 fallthrough; 214 case IP_VERSION(13, 0, 6): 215 case IP_VERSION(13, 0, 14): 216 psp_v13_0_set_psp_funcs(psp); 217 psp->autoload_supported = false; 218 break; 219 case IP_VERSION(13, 0, 12): 220 psp_v13_0_set_psp_funcs(psp); 221 psp->autoload_supported = false; 222 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 223 break; 224 case IP_VERSION(13, 0, 1): 225 case IP_VERSION(13, 0, 3): 226 case IP_VERSION(13, 0, 5): 227 case IP_VERSION(13, 0, 8): 228 case IP_VERSION(13, 0, 11): 229 case IP_VERSION(14, 0, 0): 230 case IP_VERSION(14, 0, 1): 231 case IP_VERSION(14, 0, 4): 232 psp_v13_0_set_psp_funcs(psp); 233 psp->boot_time_tmr = false; 234 break; 235 case IP_VERSION(11, 0, 8): 236 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 237 psp_v11_0_8_set_psp_funcs(psp); 238 } 239 psp->autoload_supported = false; 240 psp->boot_time_tmr = false; 241 break; 242 case IP_VERSION(13, 0, 0): 243 case IP_VERSION(13, 0, 7): 244 case IP_VERSION(13, 0, 10): 245 psp_v13_0_set_psp_funcs(psp); 246 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 247 psp->boot_time_tmr = false; 248 break; 249 case IP_VERSION(13, 0, 4): 250 psp_v13_0_4_set_psp_funcs(psp); 251 psp->boot_time_tmr = false; 252 break; 253 case IP_VERSION(14, 0, 2): 254 case IP_VERSION(14, 0, 3): 255 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 256 psp_v14_0_set_psp_funcs(psp); 257 break; 258 case IP_VERSION(14, 0, 5): 259 psp_v14_0_set_psp_funcs(psp); 260 psp->boot_time_tmr = false; 261 break; 262 default: 263 return -EINVAL; 264 } 265 266 psp->adev = adev; 267 268 adev->psp_timeout = 20000; 269 270 psp_check_pmfw_centralized_cstate_management(psp); 271 272 if (amdgpu_sriov_vf(adev)) 273 return psp_init_sriov_microcode(psp); 274 else 275 return psp_init_microcode(psp); 276 } 277 278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 279 { 280 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 281 &mem_ctx->shared_buf); 282 mem_ctx->shared_bo = NULL; 283 } 284 285 static void psp_free_shared_bufs(struct psp_context *psp) 286 { 287 void *tmr_buf; 288 void **pptr; 289 290 /* free TMR memory buffer */ 291 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 292 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 293 psp->tmr_bo = NULL; 294 295 /* free xgmi shared memory */ 296 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 297 298 /* free ras shared memory */ 299 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 300 301 /* free hdcp shared memory */ 302 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 303 304 /* free dtm shared memory */ 305 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 306 307 /* free rap shared memory */ 308 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 309 310 /* free securedisplay shared memory */ 311 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 312 313 314 } 315 316 static void psp_memory_training_fini(struct psp_context *psp) 317 { 318 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 319 320 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 321 kfree(ctx->sys_cache); 322 ctx->sys_cache = NULL; 323 } 324 325 static int psp_memory_training_init(struct psp_context *psp) 326 { 327 int ret; 328 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 329 330 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 331 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 332 return 0; 333 } 334 335 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 336 if (ctx->sys_cache == NULL) { 337 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 338 ret = -ENOMEM; 339 goto Err_out; 340 } 341 342 dev_dbg(psp->adev->dev, 343 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 344 ctx->train_data_size, 345 ctx->p2c_train_data_offset, 346 ctx->c2p_train_data_offset); 347 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 348 return 0; 349 350 Err_out: 351 psp_memory_training_fini(psp); 352 return ret; 353 } 354 355 /* 356 * Helper funciton to query psp runtime database entry 357 * 358 * @adev: amdgpu_device pointer 359 * @entry_type: the type of psp runtime database entry 360 * @db_entry: runtime database entry pointer 361 * 362 * Return false if runtime database doesn't exit or entry is invalid 363 * or true if the specific database entry is found, and copy to @db_entry 364 */ 365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 366 enum psp_runtime_entry_type entry_type, 367 void *db_entry) 368 { 369 uint64_t db_header_pos, db_dir_pos; 370 struct psp_runtime_data_header db_header = {0}; 371 struct psp_runtime_data_directory db_dir = {0}; 372 bool ret = false; 373 int i; 374 375 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 376 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 377 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 378 return false; 379 380 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 381 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 382 383 /* read runtime db header from vram */ 384 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 385 sizeof(struct psp_runtime_data_header), false); 386 387 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 388 /* runtime db doesn't exist, exit */ 389 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 390 return false; 391 } 392 393 /* read runtime database entry from vram */ 394 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 395 sizeof(struct psp_runtime_data_directory), false); 396 397 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 398 /* invalid db entry count, exit */ 399 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 400 return false; 401 } 402 403 /* look up for requested entry type */ 404 for (i = 0; i < db_dir.entry_count && !ret; i++) { 405 if (db_dir.entry_list[i].entry_type == entry_type) { 406 switch (entry_type) { 407 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 408 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 409 /* invalid db entry size */ 410 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 411 return false; 412 } 413 /* read runtime database entry */ 414 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 415 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 416 ret = true; 417 break; 418 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 419 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 420 /* invalid db entry size */ 421 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 422 return false; 423 } 424 /* read runtime database entry */ 425 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 426 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 427 ret = true; 428 break; 429 default: 430 ret = false; 431 break; 432 } 433 } 434 } 435 436 return ret; 437 } 438 439 static int psp_sw_init(struct amdgpu_ip_block *ip_block) 440 { 441 struct amdgpu_device *adev = ip_block->adev; 442 struct psp_context *psp = &adev->psp; 443 int ret; 444 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 445 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 446 struct psp_runtime_scpm_entry scpm_entry; 447 448 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 449 if (!psp->cmd) { 450 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 451 return -ENOMEM; 452 } 453 454 adev->psp.xgmi_context.supports_extended_data = 455 !adev->gmc.xgmi.connected_to_cpu && 456 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 457 458 memset(&scpm_entry, 0, sizeof(scpm_entry)); 459 if ((psp_get_runtime_db_entry(adev, 460 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 461 &scpm_entry)) && 462 (scpm_entry.scpm_status != SCPM_DISABLE)) { 463 adev->scpm_enabled = true; 464 adev->scpm_status = scpm_entry.scpm_status; 465 } else { 466 adev->scpm_enabled = false; 467 adev->scpm_status = SCPM_DISABLE; 468 } 469 470 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 471 472 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 473 if (psp_get_runtime_db_entry(adev, 474 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 475 &boot_cfg_entry)) { 476 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 477 if ((psp->boot_cfg_bitmask) & 478 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 479 /* If psp runtime database exists, then 480 * only enable two stage memory training 481 * when TWO_STAGE_DRAM_TRAINING bit is set 482 * in runtime database 483 */ 484 mem_training_ctx->enable_mem_training = true; 485 } 486 487 } else { 488 /* If psp runtime database doesn't exist or is 489 * invalid, force enable two stage memory training 490 */ 491 mem_training_ctx->enable_mem_training = true; 492 } 493 494 if (mem_training_ctx->enable_mem_training) { 495 ret = psp_memory_training_init(psp); 496 if (ret) { 497 dev_err(adev->dev, "Failed to initialize memory training!\n"); 498 return ret; 499 } 500 501 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 502 if (ret) { 503 dev_err(adev->dev, "Failed to process memory training!\n"); 504 return ret; 505 } 506 } 507 508 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 509 AMDGPU_GEM_DOMAIN_VRAM, 510 &psp->fw_pri_bo, 511 &psp->fw_pri_mc_addr, 512 &psp->fw_pri_buf); 513 if (ret) 514 return ret; 515 516 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 517 AMDGPU_GEM_DOMAIN_VRAM | 518 AMDGPU_GEM_DOMAIN_GTT, 519 &psp->fence_buf_bo, 520 &psp->fence_buf_mc_addr, 521 &psp->fence_buf); 522 if (ret) 523 goto failed1; 524 525 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 526 AMDGPU_GEM_DOMAIN_VRAM | 527 AMDGPU_GEM_DOMAIN_GTT, 528 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 529 (void **)&psp->cmd_buf_mem); 530 if (ret) 531 goto failed2; 532 533 return 0; 534 535 failed2: 536 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 537 &psp->fence_buf_mc_addr, &psp->fence_buf); 538 failed1: 539 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 540 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 541 return ret; 542 } 543 544 static int psp_sw_fini(struct amdgpu_ip_block *ip_block) 545 { 546 struct amdgpu_device *adev = ip_block->adev; 547 struct psp_context *psp = &adev->psp; 548 549 psp_memory_training_fini(psp); 550 551 amdgpu_ucode_release(&psp->sos_fw); 552 amdgpu_ucode_release(&psp->asd_fw); 553 amdgpu_ucode_release(&psp->ta_fw); 554 amdgpu_ucode_release(&psp->cap_fw); 555 amdgpu_ucode_release(&psp->toc_fw); 556 557 kfree(psp->cmd); 558 psp->cmd = NULL; 559 560 psp_free_shared_bufs(psp); 561 562 if (psp->km_ring.ring_mem) 563 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 564 &psp->km_ring.ring_mem_mc_addr, 565 (void **)&psp->km_ring.ring_mem); 566 567 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 568 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 569 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 570 &psp->fence_buf_mc_addr, &psp->fence_buf); 571 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 572 (void **)&psp->cmd_buf_mem); 573 574 return 0; 575 } 576 577 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val, 578 uint32_t mask, uint32_t flags) 579 { 580 bool check_changed = flags & PSP_WAITREG_CHANGED; 581 bool verbose = !(flags & PSP_WAITREG_NOVERBOSE); 582 uint32_t val; 583 int i; 584 struct amdgpu_device *adev = psp->adev; 585 586 if (psp->adev->no_hw_access) 587 return 0; 588 589 for (i = 0; i < adev->usec_timeout; i++) { 590 val = RREG32(reg_index); 591 if (check_changed) { 592 if (val != reg_val) 593 return 0; 594 } else { 595 if ((val & mask) == reg_val) 596 return 0; 597 } 598 udelay(1); 599 } 600 601 if (verbose) 602 dev_err(adev->dev, 603 "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x", 604 reg_index, mask, val, reg_val); 605 606 return -ETIME; 607 } 608 609 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 610 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 611 { 612 uint32_t val; 613 int i; 614 struct amdgpu_device *adev = psp->adev; 615 616 if (psp->adev->no_hw_access) 617 return 0; 618 619 for (i = 0; i < msec_timeout; i++) { 620 val = RREG32(reg_index); 621 if ((val & mask) == reg_val) 622 return 0; 623 msleep(1); 624 } 625 626 return -ETIME; 627 } 628 629 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 630 { 631 switch (cmd_id) { 632 case GFX_CMD_ID_LOAD_TA: 633 return "LOAD_TA"; 634 case GFX_CMD_ID_UNLOAD_TA: 635 return "UNLOAD_TA"; 636 case GFX_CMD_ID_INVOKE_CMD: 637 return "INVOKE_CMD"; 638 case GFX_CMD_ID_LOAD_ASD: 639 return "LOAD_ASD"; 640 case GFX_CMD_ID_SETUP_TMR: 641 return "SETUP_TMR"; 642 case GFX_CMD_ID_LOAD_IP_FW: 643 return "LOAD_IP_FW"; 644 case GFX_CMD_ID_DESTROY_TMR: 645 return "DESTROY_TMR"; 646 case GFX_CMD_ID_SAVE_RESTORE: 647 return "SAVE_RESTORE_IP_FW"; 648 case GFX_CMD_ID_SETUP_VMR: 649 return "SETUP_VMR"; 650 case GFX_CMD_ID_DESTROY_VMR: 651 return "DESTROY_VMR"; 652 case GFX_CMD_ID_PROG_REG: 653 return "PROG_REG"; 654 case GFX_CMD_ID_GET_FW_ATTESTATION: 655 return "GET_FW_ATTESTATION"; 656 case GFX_CMD_ID_LOAD_TOC: 657 return "ID_LOAD_TOC"; 658 case GFX_CMD_ID_AUTOLOAD_RLC: 659 return "AUTOLOAD_RLC"; 660 case GFX_CMD_ID_BOOT_CFG: 661 return "BOOT_CFG"; 662 case GFX_CMD_ID_CONFIG_SQ_PERFMON: 663 return "CONFIG_SQ_PERFMON"; 664 case GFX_CMD_ID_FB_FW_RESERV_ADDR: 665 return "FB_FW_RESERV_ADDR"; 666 case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR: 667 return "FB_FW_RESERV_EXT_ADDR"; 668 case GFX_CMD_ID_SRIOV_SPATIAL_PART: 669 return "SPATIAL_PARTITION"; 670 case GFX_CMD_ID_FB_NPS_MODE: 671 return "NPS_MODE_CHANGE"; 672 default: 673 return "UNKNOWN CMD"; 674 } 675 } 676 677 static bool psp_err_warn(struct psp_context *psp) 678 { 679 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 680 681 /* This response indicates reg list is already loaded */ 682 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 683 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 684 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 685 cmd->resp.status == TEE_ERROR_CANCEL) 686 return false; 687 688 return true; 689 } 690 691 static int 692 psp_cmd_submit_buf(struct psp_context *psp, 693 struct amdgpu_firmware_info *ucode, 694 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 695 { 696 int ret; 697 int index; 698 int timeout = psp->adev->psp_timeout; 699 bool ras_intr = false; 700 bool skip_unsupport = false; 701 702 if (psp->adev->no_hw_access) 703 return 0; 704 705 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 706 707 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 708 709 index = atomic_inc_return(&psp->fence_value); 710 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 711 if (ret) { 712 atomic_dec(&psp->fence_value); 713 goto exit; 714 } 715 716 amdgpu_device_invalidate_hdp(psp->adev, NULL); 717 while (*((unsigned int *)psp->fence_buf) != index) { 718 if (--timeout == 0) 719 break; 720 /* 721 * Shouldn't wait for timeout when err_event_athub occurs, 722 * because gpu reset thread triggered and lock resource should 723 * be released for psp resume sequence. 724 */ 725 ras_intr = amdgpu_ras_intr_triggered(); 726 if (ras_intr) 727 break; 728 usleep_range(10, 100); 729 amdgpu_device_invalidate_hdp(psp->adev, NULL); 730 } 731 732 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 733 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 734 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 735 736 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 737 738 /* In some cases, psp response status is not 0 even there is no 739 * problem while the command is submitted. Some version of PSP FW 740 * doesn't write 0 to that field. 741 * So here we would like to only print a warning instead of an error 742 * during psp initialization to avoid breaking hw_init and it doesn't 743 * return -EINVAL. 744 */ 745 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 746 if (ucode) 747 dev_warn(psp->adev->dev, 748 "failed to load ucode %s(0x%X) ", 749 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 750 if (psp_err_warn(psp)) 751 dev_warn( 752 psp->adev->dev, 753 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 754 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 755 psp->cmd_buf_mem->cmd_id, 756 psp->cmd_buf_mem->resp.status); 757 /* If any firmware (including CAP) load fails under SRIOV, it should 758 * return failure to stop the VF from initializing. 759 * Also return failure in case of timeout 760 */ 761 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 762 ret = -EINVAL; 763 goto exit; 764 } 765 } 766 767 if (ucode) { 768 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 769 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 770 } 771 772 exit: 773 return ret; 774 } 775 776 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 777 { 778 struct psp_gfx_cmd_resp *cmd = psp->cmd; 779 780 mutex_lock(&psp->mutex); 781 782 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 783 784 return cmd; 785 } 786 787 static void release_psp_cmd_buf(struct psp_context *psp) 788 { 789 mutex_unlock(&psp->mutex); 790 } 791 792 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 793 struct psp_gfx_cmd_resp *cmd, 794 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 795 { 796 struct amdgpu_device *adev = psp->adev; 797 uint32_t size = 0; 798 uint64_t tmr_pa = 0; 799 800 if (tmr_bo) { 801 size = amdgpu_bo_size(tmr_bo); 802 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 803 } 804 805 if (amdgpu_sriov_vf(psp->adev)) 806 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 807 else 808 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 809 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 810 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 811 cmd->cmd.cmd_setup_tmr.buf_size = size; 812 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 813 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 814 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 815 } 816 817 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 818 uint64_t pri_buf_mc, uint32_t size) 819 { 820 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 821 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 822 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 823 cmd->cmd.cmd_load_toc.toc_size = size; 824 } 825 826 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 827 static int psp_load_toc(struct psp_context *psp, 828 uint32_t *tmr_size) 829 { 830 int ret; 831 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 832 833 /* Copy toc to psp firmware private buffer */ 834 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 835 836 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 837 838 ret = psp_cmd_submit_buf(psp, NULL, cmd, 839 psp->fence_buf_mc_addr); 840 if (!ret) 841 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 842 843 release_psp_cmd_buf(psp); 844 845 return ret; 846 } 847 848 /* Set up Trusted Memory Region */ 849 static int psp_tmr_init(struct psp_context *psp) 850 { 851 int ret = 0; 852 int tmr_size; 853 void *tmr_buf; 854 void **pptr; 855 856 /* 857 * According to HW engineer, they prefer the TMR address be "naturally 858 * aligned" , e.g. the start address be an integer divide of TMR size. 859 * 860 * Note: this memory need be reserved till the driver 861 * uninitializes. 862 */ 863 tmr_size = PSP_TMR_SIZE(psp->adev); 864 865 /* For ASICs support RLC autoload, psp will parse the toc 866 * and calculate the total size of TMR needed 867 */ 868 if (!amdgpu_sriov_vf(psp->adev) && 869 psp->toc.start_addr && 870 psp->toc.size_bytes && 871 psp->fw_pri_buf) { 872 ret = psp_load_toc(psp, &tmr_size); 873 if (ret) { 874 dev_err(psp->adev->dev, "Failed to load toc\n"); 875 return ret; 876 } 877 } 878 879 if (!psp->tmr_bo && !psp->boot_time_tmr) { 880 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 881 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 882 PSP_TMR_ALIGNMENT, 883 AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM, 884 &psp->tmr_bo, &psp->tmr_mc_addr, 885 pptr); 886 } 887 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo) 888 psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo); 889 890 return ret; 891 } 892 893 static bool psp_skip_tmr(struct psp_context *psp) 894 { 895 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 896 case IP_VERSION(11, 0, 9): 897 case IP_VERSION(11, 0, 7): 898 case IP_VERSION(13, 0, 2): 899 case IP_VERSION(13, 0, 6): 900 case IP_VERSION(13, 0, 10): 901 case IP_VERSION(13, 0, 12): 902 case IP_VERSION(13, 0, 14): 903 return true; 904 default: 905 return false; 906 } 907 } 908 909 static int psp_tmr_load(struct psp_context *psp) 910 { 911 int ret; 912 struct psp_gfx_cmd_resp *cmd; 913 914 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 915 * Already set up by host driver. 916 */ 917 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 918 return 0; 919 920 cmd = acquire_psp_cmd_buf(psp); 921 922 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 923 if (psp->tmr_bo) 924 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 925 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 926 927 ret = psp_cmd_submit_buf(psp, NULL, cmd, 928 psp->fence_buf_mc_addr); 929 930 release_psp_cmd_buf(psp); 931 932 return ret; 933 } 934 935 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 936 struct psp_gfx_cmd_resp *cmd) 937 { 938 if (amdgpu_sriov_vf(psp->adev)) 939 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 940 else 941 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 942 } 943 944 static int psp_tmr_unload(struct psp_context *psp) 945 { 946 int ret; 947 struct psp_gfx_cmd_resp *cmd; 948 949 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 950 * as TMR is not loaded at all 951 */ 952 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 953 return 0; 954 955 cmd = acquire_psp_cmd_buf(psp); 956 957 psp_prep_tmr_unload_cmd_buf(psp, cmd); 958 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 959 960 ret = psp_cmd_submit_buf(psp, NULL, cmd, 961 psp->fence_buf_mc_addr); 962 963 release_psp_cmd_buf(psp); 964 965 return ret; 966 } 967 968 static int psp_tmr_terminate(struct psp_context *psp) 969 { 970 return psp_tmr_unload(psp); 971 } 972 973 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 974 uint64_t *output_ptr) 975 { 976 int ret; 977 struct psp_gfx_cmd_resp *cmd; 978 979 if (!output_ptr) 980 return -EINVAL; 981 982 if (amdgpu_sriov_vf(psp->adev)) 983 return 0; 984 985 cmd = acquire_psp_cmd_buf(psp); 986 987 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 988 989 ret = psp_cmd_submit_buf(psp, NULL, cmd, 990 psp->fence_buf_mc_addr); 991 992 if (!ret) { 993 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 994 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 995 } 996 997 release_psp_cmd_buf(psp); 998 999 return ret; 1000 } 1001 1002 static int psp_get_fw_reservation_info(struct psp_context *psp, 1003 uint32_t cmd_id, 1004 uint64_t *addr, 1005 uint32_t *size) 1006 { 1007 int ret; 1008 uint32_t status; 1009 struct psp_gfx_cmd_resp *cmd; 1010 1011 cmd = acquire_psp_cmd_buf(psp); 1012 1013 cmd->cmd_id = cmd_id; 1014 1015 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1016 psp->fence_buf_mc_addr); 1017 if (ret) { 1018 release_psp_cmd_buf(psp); 1019 return ret; 1020 } 1021 1022 status = cmd->resp.status; 1023 if (status == PSP_ERR_UNKNOWN_COMMAND) { 1024 release_psp_cmd_buf(psp); 1025 *addr = 0; 1026 *size = 0; 1027 return 0; 1028 } 1029 1030 *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 | 1031 cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo; 1032 *size = cmd->resp.uresp.fw_reserve_info.reserve_size; 1033 1034 release_psp_cmd_buf(psp); 1035 1036 return 0; 1037 } 1038 1039 int psp_update_fw_reservation(struct psp_context *psp) 1040 { 1041 int ret; 1042 uint64_t reserv_addr, reserv_addr_ext; 1043 uint32_t reserv_size, reserv_size_ext, mp0_ip_ver; 1044 struct amdgpu_device *adev = psp->adev; 1045 1046 mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0); 1047 1048 if (amdgpu_sriov_vf(psp->adev)) 1049 return 0; 1050 1051 switch (mp0_ip_ver) { 1052 case IP_VERSION(14, 0, 2): 1053 if (adev->psp.sos.fw_version < 0x3b0e0d) 1054 return 0; 1055 break; 1056 1057 case IP_VERSION(14, 0, 3): 1058 if (adev->psp.sos.fw_version < 0x3a0e14) 1059 return 0; 1060 break; 1061 1062 default: 1063 return 0; 1064 } 1065 1066 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); 1067 if (ret) 1068 return ret; 1069 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext); 1070 if (ret) 1071 return ret; 1072 1073 if (reserv_addr != adev->gmc.real_vram_size - reserv_size) { 1074 dev_warn(adev->dev, "reserve fw region is not valid!\n"); 1075 return 0; 1076 } 1077 1078 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1079 1080 reserv_size = roundup(reserv_size, SZ_1M); 1081 1082 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL); 1083 if (ret) { 1084 dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret); 1085 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1086 return ret; 1087 } 1088 1089 reserv_size_ext = roundup(reserv_size_ext, SZ_1M); 1090 1091 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext, 1092 &adev->mman.fw_reserved_memory_extend, NULL); 1093 if (ret) { 1094 dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret); 1095 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL); 1096 return ret; 1097 } 1098 1099 return 0; 1100 } 1101 1102 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 1103 { 1104 struct psp_context *psp = &adev->psp; 1105 struct psp_gfx_cmd_resp *cmd; 1106 int ret; 1107 1108 if (amdgpu_sriov_vf(adev)) 1109 return 0; 1110 1111 cmd = acquire_psp_cmd_buf(psp); 1112 1113 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1114 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 1115 1116 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1117 if (!ret) { 1118 *boot_cfg = 1119 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 1120 } 1121 1122 release_psp_cmd_buf(psp); 1123 1124 return ret; 1125 } 1126 1127 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 1128 { 1129 int ret; 1130 struct psp_context *psp = &adev->psp; 1131 struct psp_gfx_cmd_resp *cmd; 1132 1133 if (amdgpu_sriov_vf(adev)) 1134 return 0; 1135 1136 cmd = acquire_psp_cmd_buf(psp); 1137 1138 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1139 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 1140 cmd->cmd.boot_cfg.boot_config = boot_cfg; 1141 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 1142 1143 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1144 1145 release_psp_cmd_buf(psp); 1146 1147 return ret; 1148 } 1149 1150 static int psp_rl_load(struct amdgpu_device *adev) 1151 { 1152 int ret; 1153 struct psp_context *psp = &adev->psp; 1154 struct psp_gfx_cmd_resp *cmd; 1155 1156 if (!is_psp_fw_valid(psp->rl)) 1157 return 0; 1158 1159 cmd = acquire_psp_cmd_buf(psp); 1160 1161 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1162 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1163 1164 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1165 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1166 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1167 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1168 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1169 1170 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1171 1172 release_psp_cmd_buf(psp); 1173 1174 return ret; 1175 } 1176 1177 int psp_memory_partition(struct psp_context *psp, int mode) 1178 { 1179 struct psp_gfx_cmd_resp *cmd; 1180 int ret; 1181 1182 if (amdgpu_sriov_vf(psp->adev)) 1183 return 0; 1184 1185 cmd = acquire_psp_cmd_buf(psp); 1186 1187 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; 1188 cmd->cmd.cmd_memory_part.mode = mode; 1189 1190 dev_info(psp->adev->dev, 1191 "Requesting %d memory partition change through PSP", mode); 1192 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1193 if (ret) 1194 dev_err(psp->adev->dev, 1195 "PSP request failed to change to NPS%d mode\n", mode); 1196 1197 release_psp_cmd_buf(psp); 1198 1199 return ret; 1200 } 1201 1202 int psp_spatial_partition(struct psp_context *psp, int mode) 1203 { 1204 struct psp_gfx_cmd_resp *cmd; 1205 int ret; 1206 1207 if (amdgpu_sriov_vf(psp->adev)) 1208 return 0; 1209 1210 cmd = acquire_psp_cmd_buf(psp); 1211 1212 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1213 cmd->cmd.cmd_spatial_part.mode = mode; 1214 1215 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1216 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1217 1218 release_psp_cmd_buf(psp); 1219 1220 return ret; 1221 } 1222 1223 static int psp_asd_initialize(struct psp_context *psp) 1224 { 1225 int ret; 1226 1227 /* If PSP version doesn't match ASD version, asd loading will be failed. 1228 * add workaround to bypass it for sriov now. 1229 * TODO: add version check to make it common 1230 */ 1231 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1232 return 0; 1233 1234 /* bypass asd if display hardware is not available */ 1235 if (!amdgpu_device_has_display_hardware(psp->adev) && 1236 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1237 return 0; 1238 1239 psp->asd_context.mem_context.shared_mc_addr = 0; 1240 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1241 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1242 1243 ret = psp_ta_load(psp, &psp->asd_context); 1244 if (!ret) 1245 psp->asd_context.initialized = true; 1246 1247 return ret; 1248 } 1249 1250 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1251 uint32_t session_id) 1252 { 1253 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1254 cmd->cmd.cmd_unload_ta.session_id = session_id; 1255 } 1256 1257 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1258 { 1259 int ret; 1260 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1261 1262 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1263 1264 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1265 1266 context->resp_status = cmd->resp.status; 1267 1268 release_psp_cmd_buf(psp); 1269 1270 return ret; 1271 } 1272 1273 static int psp_asd_terminate(struct psp_context *psp) 1274 { 1275 int ret; 1276 1277 if (amdgpu_sriov_vf(psp->adev)) 1278 return 0; 1279 1280 if (!psp->asd_context.initialized) 1281 return 0; 1282 1283 ret = psp_ta_unload(psp, &psp->asd_context); 1284 if (!ret) 1285 psp->asd_context.initialized = false; 1286 1287 return ret; 1288 } 1289 1290 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1291 uint32_t id, uint32_t value) 1292 { 1293 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1294 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1295 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1296 } 1297 1298 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1299 uint32_t value) 1300 { 1301 struct psp_gfx_cmd_resp *cmd; 1302 int ret = 0; 1303 1304 if (reg >= PSP_REG_LAST) 1305 return -EINVAL; 1306 1307 cmd = acquire_psp_cmd_buf(psp); 1308 1309 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1310 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1311 if (ret) 1312 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1313 1314 release_psp_cmd_buf(psp); 1315 1316 return ret; 1317 } 1318 1319 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1320 uint64_t ta_bin_mc, 1321 struct ta_context *context) 1322 { 1323 cmd->cmd_id = context->ta_load_type; 1324 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1325 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1326 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1327 1328 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1329 lower_32_bits(context->mem_context.shared_mc_addr); 1330 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1331 upper_32_bits(context->mem_context.shared_mc_addr); 1332 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1333 } 1334 1335 int psp_ta_init_shared_buf(struct psp_context *psp, 1336 struct ta_mem_context *mem_ctx) 1337 { 1338 /* 1339 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1340 * physical) for ta to host memory 1341 */ 1342 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1343 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1344 AMDGPU_GEM_DOMAIN_GTT, 1345 &mem_ctx->shared_bo, 1346 &mem_ctx->shared_mc_addr, 1347 &mem_ctx->shared_buf); 1348 } 1349 1350 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1351 uint32_t ta_cmd_id, 1352 uint32_t session_id) 1353 { 1354 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1355 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1356 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1357 } 1358 1359 int psp_ta_invoke(struct psp_context *psp, 1360 uint32_t ta_cmd_id, 1361 struct ta_context *context) 1362 { 1363 int ret; 1364 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1365 1366 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1367 1368 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1369 psp->fence_buf_mc_addr); 1370 1371 context->resp_status = cmd->resp.status; 1372 1373 release_psp_cmd_buf(psp); 1374 1375 return ret; 1376 } 1377 1378 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1379 { 1380 int ret; 1381 struct psp_gfx_cmd_resp *cmd; 1382 1383 cmd = acquire_psp_cmd_buf(psp); 1384 1385 psp_copy_fw(psp, context->bin_desc.start_addr, 1386 context->bin_desc.size_bytes); 1387 1388 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && 1389 context->mem_context.shared_bo) 1390 context->mem_context.shared_mc_addr = 1391 amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo); 1392 1393 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1394 1395 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1396 psp->fence_buf_mc_addr); 1397 1398 context->resp_status = cmd->resp.status; 1399 1400 if (!ret) 1401 context->session_id = cmd->resp.session_id; 1402 1403 release_psp_cmd_buf(psp); 1404 1405 return ret; 1406 } 1407 1408 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1409 { 1410 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1411 } 1412 1413 int psp_xgmi_terminate(struct psp_context *psp) 1414 { 1415 int ret; 1416 struct amdgpu_device *adev = psp->adev; 1417 1418 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1419 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1420 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1421 adev->gmc.xgmi.connected_to_cpu)) 1422 return 0; 1423 1424 if (!psp->xgmi_context.context.initialized) 1425 return 0; 1426 1427 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1428 1429 psp->xgmi_context.context.initialized = false; 1430 1431 return ret; 1432 } 1433 1434 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1435 { 1436 struct ta_xgmi_shared_memory *xgmi_cmd; 1437 int ret; 1438 1439 if (!psp->ta_fw || 1440 !psp->xgmi_context.context.bin_desc.size_bytes || 1441 !psp->xgmi_context.context.bin_desc.start_addr) 1442 return -ENOENT; 1443 1444 if (!load_ta) 1445 goto invoke; 1446 1447 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1448 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1449 1450 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1451 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1452 if (ret) 1453 return ret; 1454 } 1455 1456 /* Load XGMI TA */ 1457 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1458 if (!ret) 1459 psp->xgmi_context.context.initialized = true; 1460 else 1461 return ret; 1462 1463 invoke: 1464 /* Initialize XGMI session */ 1465 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1466 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1467 xgmi_cmd->flag_extend_link_record = set_extended_data; 1468 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1469 1470 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1471 /* note down the capbility flag for XGMI TA */ 1472 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1473 1474 return ret; 1475 } 1476 1477 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1478 { 1479 struct ta_xgmi_shared_memory *xgmi_cmd; 1480 int ret; 1481 1482 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1483 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1484 1485 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1486 1487 /* Invoke xgmi ta to get hive id */ 1488 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1489 if (ret) 1490 return ret; 1491 1492 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1493 1494 return 0; 1495 } 1496 1497 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1498 { 1499 struct ta_xgmi_shared_memory *xgmi_cmd; 1500 int ret; 1501 1502 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1503 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1504 1505 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1506 1507 /* Invoke xgmi ta to get the node id */ 1508 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1509 if (ret) 1510 return ret; 1511 1512 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1513 1514 return 0; 1515 } 1516 1517 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1518 { 1519 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1520 IP_VERSION(13, 0, 2) && 1521 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1522 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1523 IP_VERSION(13, 0, 6); 1524 } 1525 1526 /* 1527 * Chips that support extended topology information require the driver to 1528 * reflect topology information in the opposite direction. This is 1529 * because the TA has already exceeded its link record limit and if the 1530 * TA holds bi-directional information, the driver would have to do 1531 * multiple fetches instead of just two. 1532 */ 1533 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1534 struct psp_xgmi_node_info node_info) 1535 { 1536 struct amdgpu_device *mirror_adev; 1537 struct amdgpu_hive_info *hive; 1538 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1539 uint64_t dst_node_id = node_info.node_id; 1540 uint8_t dst_num_hops = node_info.num_hops; 1541 uint8_t dst_num_links = node_info.num_links; 1542 1543 hive = amdgpu_get_xgmi_hive(psp->adev); 1544 if (WARN_ON(!hive)) 1545 return; 1546 1547 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1548 struct psp_xgmi_topology_info *mirror_top_info; 1549 int j; 1550 1551 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1552 continue; 1553 1554 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1555 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1556 if (mirror_top_info->nodes[j].node_id != src_node_id) 1557 continue; 1558 1559 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1560 /* 1561 * prevent 0 num_links value re-reflection since reflection 1562 * criteria is based on num_hops (direct or indirect). 1563 * 1564 */ 1565 if (dst_num_links) 1566 mirror_top_info->nodes[j].num_links = dst_num_links; 1567 1568 break; 1569 } 1570 1571 break; 1572 } 1573 1574 amdgpu_put_xgmi_hive(hive); 1575 } 1576 1577 int psp_xgmi_get_topology_info(struct psp_context *psp, 1578 int number_devices, 1579 struct psp_xgmi_topology_info *topology, 1580 bool get_extended_data) 1581 { 1582 struct ta_xgmi_shared_memory *xgmi_cmd; 1583 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1584 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1585 int i; 1586 int ret; 1587 1588 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1589 return -EINVAL; 1590 1591 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1592 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1593 xgmi_cmd->flag_extend_link_record = get_extended_data; 1594 1595 /* Fill in the shared memory with topology information as input */ 1596 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1597 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1598 topology_info_input->num_nodes = number_devices; 1599 1600 for (i = 0; i < topology_info_input->num_nodes; i++) { 1601 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1602 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1603 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1604 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1605 } 1606 1607 /* Invoke xgmi ta to get the topology information */ 1608 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1609 if (ret) 1610 return ret; 1611 1612 /* Read the output topology information from the shared memory */ 1613 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1614 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1615 for (i = 0; i < topology->num_nodes; i++) { 1616 /* extended data will either be 0 or equal to non-extended data */ 1617 if (topology_info_output->nodes[i].num_hops) 1618 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1619 1620 /* non-extended data gets everything here so no need to update */ 1621 if (!get_extended_data) { 1622 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1623 topology->nodes[i].is_sharing_enabled = 1624 topology_info_output->nodes[i].is_sharing_enabled; 1625 topology->nodes[i].sdma_engine = 1626 topology_info_output->nodes[i].sdma_engine; 1627 } 1628 1629 } 1630 1631 /* Invoke xgmi ta again to get the link information */ 1632 if (psp_xgmi_peer_link_info_supported(psp)) { 1633 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1634 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1635 bool requires_reflection = 1636 (psp->xgmi_context.supports_extended_data && 1637 get_extended_data) || 1638 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1639 IP_VERSION(13, 0, 6) || 1640 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1641 IP_VERSION(13, 0, 14); 1642 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1643 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1644 1645 /* popluate the shared output buffer rather than the cmd input buffer 1646 * with node_ids as the input for GET_PEER_LINKS command execution. 1647 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1648 * The same requirement for GET_EXTEND_PEER_LINKS command. 1649 */ 1650 if (ta_port_num_support) { 1651 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1652 1653 for (i = 0; i < topology->num_nodes; i++) 1654 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1655 1656 link_extend_info_output->num_nodes = topology->num_nodes; 1657 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1658 } else { 1659 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1660 1661 for (i = 0; i < topology->num_nodes; i++) 1662 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1663 1664 link_info_output->num_nodes = topology->num_nodes; 1665 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1666 } 1667 1668 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1669 if (ret) 1670 return ret; 1671 1672 for (i = 0; i < topology->num_nodes; i++) { 1673 uint8_t node_num_links = ta_port_num_support ? 1674 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1675 /* accumulate num_links on extended data */ 1676 if (get_extended_data) { 1677 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1678 } else { 1679 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1680 topology->nodes[i].num_links : node_num_links; 1681 } 1682 /* popluate the connected port num info if supported and available */ 1683 if (ta_port_num_support && topology->nodes[i].num_links) { 1684 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1685 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1686 } 1687 1688 /* reflect the topology information for bi-directionality */ 1689 if (requires_reflection && topology->nodes[i].num_hops) 1690 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1691 } 1692 } 1693 1694 return 0; 1695 } 1696 1697 int psp_xgmi_set_topology_info(struct psp_context *psp, 1698 int number_devices, 1699 struct psp_xgmi_topology_info *topology) 1700 { 1701 struct ta_xgmi_shared_memory *xgmi_cmd; 1702 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1703 int i; 1704 1705 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1706 return -EINVAL; 1707 1708 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1709 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1710 1711 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1712 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1713 topology_info_input->num_nodes = number_devices; 1714 1715 for (i = 0; i < topology_info_input->num_nodes; i++) { 1716 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1717 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1718 topology_info_input->nodes[i].is_sharing_enabled = 1; 1719 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1720 } 1721 1722 /* Invoke xgmi ta to set topology information */ 1723 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1724 } 1725 1726 // ras begin 1727 static void psp_ras_ta_check_status(struct psp_context *psp) 1728 { 1729 struct ta_ras_shared_memory *ras_cmd = 1730 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1731 1732 switch (ras_cmd->ras_status) { 1733 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1734 dev_warn(psp->adev->dev, 1735 "RAS WARNING: cmd failed due to unsupported ip\n"); 1736 break; 1737 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1738 dev_warn(psp->adev->dev, 1739 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1740 break; 1741 case TA_RAS_STATUS__SUCCESS: 1742 break; 1743 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1744 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1745 dev_warn(psp->adev->dev, 1746 "RAS WARNING: Inject error to critical region is not allowed\n"); 1747 break; 1748 default: 1749 dev_warn(psp->adev->dev, 1750 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1751 break; 1752 } 1753 } 1754 1755 static int psp_ras_send_cmd(struct psp_context *psp, 1756 enum ras_command cmd_id, void *in, void *out) 1757 { 1758 struct ta_ras_shared_memory *ras_cmd; 1759 uint32_t cmd = cmd_id; 1760 int ret = 0; 1761 1762 if (!in) 1763 return -EINVAL; 1764 1765 mutex_lock(&psp->ras_context.mutex); 1766 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1767 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1768 1769 switch (cmd) { 1770 case TA_RAS_COMMAND__ENABLE_FEATURES: 1771 case TA_RAS_COMMAND__DISABLE_FEATURES: 1772 memcpy(&ras_cmd->ras_in_message, 1773 in, sizeof(ras_cmd->ras_in_message)); 1774 break; 1775 case TA_RAS_COMMAND__TRIGGER_ERROR: 1776 memcpy(&ras_cmd->ras_in_message.trigger_error, 1777 in, sizeof(ras_cmd->ras_in_message.trigger_error)); 1778 break; 1779 case TA_RAS_COMMAND__QUERY_ADDRESS: 1780 memcpy(&ras_cmd->ras_in_message.address, 1781 in, sizeof(ras_cmd->ras_in_message.address)); 1782 break; 1783 default: 1784 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd); 1785 ret = -EINVAL; 1786 goto err_out; 1787 } 1788 1789 ras_cmd->cmd_id = cmd; 1790 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1791 1792 switch (cmd) { 1793 case TA_RAS_COMMAND__TRIGGER_ERROR: 1794 if (!ret && out) 1795 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status)); 1796 break; 1797 case TA_RAS_COMMAND__QUERY_ADDRESS: 1798 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1799 ret = -EINVAL; 1800 else if (out) 1801 memcpy(out, 1802 &ras_cmd->ras_out_message.address, 1803 sizeof(ras_cmd->ras_out_message.address)); 1804 break; 1805 default: 1806 break; 1807 } 1808 1809 err_out: 1810 mutex_unlock(&psp->ras_context.mutex); 1811 1812 return ret; 1813 } 1814 1815 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1816 { 1817 struct ta_ras_shared_memory *ras_cmd; 1818 int ret; 1819 1820 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1821 1822 /* 1823 * TODO: bypass the loading in sriov for now 1824 */ 1825 if (amdgpu_sriov_vf(psp->adev)) 1826 return 0; 1827 1828 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1829 1830 if (amdgpu_ras_intr_triggered()) 1831 return ret; 1832 1833 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1834 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1835 return -EINVAL; 1836 } 1837 1838 if (!ret) { 1839 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1840 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1841 1842 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1843 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1844 dev_warn(psp->adev->dev, 1845 "RAS internal register access blocked\n"); 1846 1847 psp_ras_ta_check_status(psp); 1848 } 1849 1850 return ret; 1851 } 1852 1853 int psp_ras_enable_features(struct psp_context *psp, 1854 union ta_ras_cmd_input *info, bool enable) 1855 { 1856 enum ras_command cmd_id; 1857 int ret; 1858 1859 if (!psp->ras_context.context.initialized || !info) 1860 return -EINVAL; 1861 1862 cmd_id = enable ? 1863 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES; 1864 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL); 1865 if (ret) 1866 return -EINVAL; 1867 1868 return 0; 1869 } 1870 1871 int psp_ras_terminate(struct psp_context *psp) 1872 { 1873 int ret; 1874 1875 /* 1876 * TODO: bypass the terminate in sriov for now 1877 */ 1878 if (amdgpu_sriov_vf(psp->adev)) 1879 return 0; 1880 1881 if (!psp->ras_context.context.initialized) 1882 return 0; 1883 1884 ret = psp_ta_unload(psp, &psp->ras_context.context); 1885 1886 psp->ras_context.context.initialized = false; 1887 1888 mutex_destroy(&psp->ras_context.mutex); 1889 1890 return ret; 1891 } 1892 1893 int psp_ras_initialize(struct psp_context *psp) 1894 { 1895 int ret; 1896 uint32_t boot_cfg = 0xFF; 1897 struct amdgpu_device *adev = psp->adev; 1898 struct ta_ras_shared_memory *ras_cmd; 1899 1900 /* 1901 * TODO: bypass the initialize in sriov for now 1902 */ 1903 if (amdgpu_sriov_vf(adev)) 1904 return 0; 1905 1906 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1907 !adev->psp.ras_context.context.bin_desc.start_addr) { 1908 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1909 return 0; 1910 } 1911 1912 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1913 /* query GECC enablement status from boot config 1914 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1915 */ 1916 ret = psp_boot_config_get(adev, &boot_cfg); 1917 if (ret) 1918 dev_warn(adev->dev, "PSP get boot config failed\n"); 1919 1920 if (boot_cfg == 1 && !adev->ras_default_ecc_enabled && 1921 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1922 dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n"); 1923 dev_warn(adev->dev, 1924 "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n"); 1925 } else { 1926 if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) && 1927 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1928 if (boot_cfg == 1) { 1929 dev_info(adev->dev, "GECC is enabled\n"); 1930 } else { 1931 /* enable GECC in next boot cycle if it is disabled 1932 * in boot config, or force enable GECC if failed to 1933 * get boot configuration 1934 */ 1935 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1936 if (ret) 1937 dev_warn(adev->dev, "PSP set boot config failed\n"); 1938 else 1939 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1940 } 1941 } else { 1942 if (!boot_cfg) { 1943 if (!adev->ras_default_ecc_enabled && 1944 amdgpu_ras_enable != 1 && 1945 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 1946 dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n"); 1947 else 1948 dev_info(adev->dev, "GECC is disabled\n"); 1949 } else { 1950 /* disable GECC in next boot cycle if ras is 1951 * disabled by module parameter amdgpu_ras_enable 1952 * and/or amdgpu_ras_mask, or boot_config_get call 1953 * is failed 1954 */ 1955 ret = psp_boot_config_set(adev, 0); 1956 if (ret) 1957 dev_warn(adev->dev, "PSP set boot config failed\n"); 1958 else 1959 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1960 } 1961 } 1962 } 1963 } 1964 1965 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1966 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1967 1968 if (!psp->ras_context.context.mem_context.shared_buf) { 1969 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1970 if (ret) 1971 return ret; 1972 } 1973 1974 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1975 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1976 1977 if (amdgpu_ras_is_poison_mode_supported(adev)) 1978 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1979 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1980 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1981 ras_cmd->ras_in_message.init_flags.xcc_mask = 1982 adev->gfx.xcc_mask; 1983 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1984 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 1985 ras_cmd->ras_in_message.init_flags.nps_mode = 1986 adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 1987 ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask; 1988 1989 ret = psp_ta_load(psp, &psp->ras_context.context); 1990 1991 if (!ret && !ras_cmd->ras_status) { 1992 psp->ras_context.context.initialized = true; 1993 mutex_init(&psp->ras_context.mutex); 1994 } else { 1995 if (ras_cmd->ras_status) 1996 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1997 1998 /* fail to load RAS TA */ 1999 psp->ras_context.context.initialized = false; 2000 } 2001 2002 return ret; 2003 } 2004 2005 int psp_ras_trigger_error(struct psp_context *psp, 2006 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 2007 { 2008 struct amdgpu_device *adev = psp->adev; 2009 int ret; 2010 uint32_t dev_mask; 2011 uint32_t ras_status = 0; 2012 2013 if (!psp->ras_context.context.initialized || !info) 2014 return -EINVAL; 2015 2016 switch (info->block_id) { 2017 case TA_RAS_BLOCK__GFX: 2018 dev_mask = GET_MASK(GC, instance_mask); 2019 break; 2020 case TA_RAS_BLOCK__SDMA: 2021 dev_mask = GET_MASK(SDMA0, instance_mask); 2022 break; 2023 case TA_RAS_BLOCK__VCN: 2024 case TA_RAS_BLOCK__JPEG: 2025 dev_mask = GET_MASK(VCN, instance_mask); 2026 break; 2027 default: 2028 dev_mask = instance_mask; 2029 break; 2030 } 2031 2032 /* reuse sub_block_index for backward compatibility */ 2033 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 2034 dev_mask &= AMDGPU_RAS_INST_MASK; 2035 info->sub_block_index |= dev_mask; 2036 2037 ret = psp_ras_send_cmd(psp, 2038 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status); 2039 if (ret) 2040 return -EINVAL; 2041 2042 /* If err_event_athub occurs error inject was successful, however 2043 * return status from TA is no long reliable 2044 */ 2045 if (amdgpu_ras_intr_triggered()) 2046 return 0; 2047 2048 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 2049 return -EACCES; 2050 else if (ras_status) 2051 return -EINVAL; 2052 2053 return 0; 2054 } 2055 2056 int psp_ras_query_address(struct psp_context *psp, 2057 struct ta_ras_query_address_input *addr_in, 2058 struct ta_ras_query_address_output *addr_out) 2059 { 2060 int ret; 2061 2062 if (!psp->ras_context.context.initialized || 2063 !addr_in || !addr_out) 2064 return -EINVAL; 2065 2066 ret = psp_ras_send_cmd(psp, 2067 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out); 2068 2069 return ret; 2070 } 2071 // ras end 2072 2073 // HDCP start 2074 static int psp_hdcp_initialize(struct psp_context *psp) 2075 { 2076 int ret; 2077 2078 /* 2079 * TODO: bypass the initialize in sriov for now 2080 */ 2081 if (amdgpu_sriov_vf(psp->adev)) 2082 return 0; 2083 2084 /* bypass hdcp initialization if dmu is harvested */ 2085 if (!amdgpu_device_has_display_hardware(psp->adev)) 2086 return 0; 2087 2088 if (!psp->hdcp_context.context.bin_desc.size_bytes || 2089 !psp->hdcp_context.context.bin_desc.start_addr) { 2090 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 2091 return 0; 2092 } 2093 2094 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 2095 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2096 2097 if (!psp->hdcp_context.context.mem_context.shared_buf) { 2098 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 2099 if (ret) 2100 return ret; 2101 } 2102 2103 ret = psp_ta_load(psp, &psp->hdcp_context.context); 2104 if (!ret) { 2105 psp->hdcp_context.context.initialized = true; 2106 mutex_init(&psp->hdcp_context.mutex); 2107 } 2108 2109 return ret; 2110 } 2111 2112 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2113 { 2114 /* 2115 * TODO: bypass the loading in sriov for now 2116 */ 2117 if (amdgpu_sriov_vf(psp->adev)) 2118 return 0; 2119 2120 if (!psp->hdcp_context.context.initialized) 2121 return 0; 2122 2123 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 2124 } 2125 2126 static int psp_hdcp_terminate(struct psp_context *psp) 2127 { 2128 int ret; 2129 2130 /* 2131 * TODO: bypass the terminate in sriov for now 2132 */ 2133 if (amdgpu_sriov_vf(psp->adev)) 2134 return 0; 2135 2136 if (!psp->hdcp_context.context.initialized) 2137 return 0; 2138 2139 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 2140 2141 psp->hdcp_context.context.initialized = false; 2142 2143 return ret; 2144 } 2145 // HDCP end 2146 2147 // DTM start 2148 static int psp_dtm_initialize(struct psp_context *psp) 2149 { 2150 int ret; 2151 2152 /* 2153 * TODO: bypass the initialize in sriov for now 2154 */ 2155 if (amdgpu_sriov_vf(psp->adev)) 2156 return 0; 2157 2158 /* bypass dtm initialization if dmu is harvested */ 2159 if (!amdgpu_device_has_display_hardware(psp->adev)) 2160 return 0; 2161 2162 if (!psp->dtm_context.context.bin_desc.size_bytes || 2163 !psp->dtm_context.context.bin_desc.start_addr) { 2164 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 2165 return 0; 2166 } 2167 2168 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 2169 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2170 2171 if (!psp->dtm_context.context.mem_context.shared_buf) { 2172 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 2173 if (ret) 2174 return ret; 2175 } 2176 2177 ret = psp_ta_load(psp, &psp->dtm_context.context); 2178 if (!ret) { 2179 psp->dtm_context.context.initialized = true; 2180 mutex_init(&psp->dtm_context.mutex); 2181 } 2182 2183 return ret; 2184 } 2185 2186 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2187 { 2188 /* 2189 * TODO: bypass the loading in sriov for now 2190 */ 2191 if (amdgpu_sriov_vf(psp->adev)) 2192 return 0; 2193 2194 if (!psp->dtm_context.context.initialized) 2195 return 0; 2196 2197 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 2198 } 2199 2200 static int psp_dtm_terminate(struct psp_context *psp) 2201 { 2202 int ret; 2203 2204 /* 2205 * TODO: bypass the terminate in sriov for now 2206 */ 2207 if (amdgpu_sriov_vf(psp->adev)) 2208 return 0; 2209 2210 if (!psp->dtm_context.context.initialized) 2211 return 0; 2212 2213 ret = psp_ta_unload(psp, &psp->dtm_context.context); 2214 2215 psp->dtm_context.context.initialized = false; 2216 2217 return ret; 2218 } 2219 // DTM end 2220 2221 // RAP start 2222 static int psp_rap_initialize(struct psp_context *psp) 2223 { 2224 int ret; 2225 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 2226 2227 /* 2228 * TODO: bypass the initialize in sriov for now 2229 */ 2230 if (amdgpu_sriov_vf(psp->adev)) 2231 return 0; 2232 2233 if (!psp->rap_context.context.bin_desc.size_bytes || 2234 !psp->rap_context.context.bin_desc.start_addr) { 2235 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 2236 return 0; 2237 } 2238 2239 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 2240 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2241 2242 if (!psp->rap_context.context.mem_context.shared_buf) { 2243 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 2244 if (ret) 2245 return ret; 2246 } 2247 2248 ret = psp_ta_load(psp, &psp->rap_context.context); 2249 if (!ret) { 2250 psp->rap_context.context.initialized = true; 2251 mutex_init(&psp->rap_context.mutex); 2252 } else 2253 return ret; 2254 2255 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2256 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2257 psp_rap_terminate(psp); 2258 /* free rap shared memory */ 2259 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2260 2261 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2262 ret, status); 2263 2264 return ret; 2265 } 2266 2267 return 0; 2268 } 2269 2270 static int psp_rap_terminate(struct psp_context *psp) 2271 { 2272 int ret; 2273 2274 if (!psp->rap_context.context.initialized) 2275 return 0; 2276 2277 ret = psp_ta_unload(psp, &psp->rap_context.context); 2278 2279 psp->rap_context.context.initialized = false; 2280 2281 return ret; 2282 } 2283 2284 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2285 { 2286 struct ta_rap_shared_memory *rap_cmd; 2287 int ret = 0; 2288 2289 if (!psp->rap_context.context.initialized) 2290 return 0; 2291 2292 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2293 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2294 return -EINVAL; 2295 2296 mutex_lock(&psp->rap_context.mutex); 2297 2298 rap_cmd = (struct ta_rap_shared_memory *) 2299 psp->rap_context.context.mem_context.shared_buf; 2300 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2301 2302 rap_cmd->cmd_id = ta_cmd_id; 2303 rap_cmd->validation_method_id = METHOD_A; 2304 2305 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2306 if (ret) 2307 goto out_unlock; 2308 2309 if (status) 2310 *status = rap_cmd->rap_status; 2311 2312 out_unlock: 2313 mutex_unlock(&psp->rap_context.mutex); 2314 2315 return ret; 2316 } 2317 // RAP end 2318 2319 /* securedisplay start */ 2320 static int psp_securedisplay_initialize(struct psp_context *psp) 2321 { 2322 int ret; 2323 struct ta_securedisplay_cmd *securedisplay_cmd; 2324 2325 /* 2326 * TODO: bypass the initialize in sriov for now 2327 */ 2328 if (amdgpu_sriov_vf(psp->adev)) 2329 return 0; 2330 2331 /* bypass securedisplay initialization if dmu is harvested */ 2332 if (!amdgpu_device_has_display_hardware(psp->adev)) 2333 return 0; 2334 2335 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2336 !psp->securedisplay_context.context.bin_desc.start_addr) { 2337 dev_info(psp->adev->dev, 2338 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n"); 2339 return 0; 2340 } 2341 2342 psp->securedisplay_context.context.mem_context.shared_mem_size = 2343 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2344 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2345 2346 if (!psp->securedisplay_context.context.initialized) { 2347 ret = psp_ta_init_shared_buf(psp, 2348 &psp->securedisplay_context.context.mem_context); 2349 if (ret) 2350 return ret; 2351 } 2352 2353 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2354 if (!ret) { 2355 psp->securedisplay_context.context.initialized = true; 2356 mutex_init(&psp->securedisplay_context.mutex); 2357 } else 2358 return ret; 2359 2360 mutex_lock(&psp->securedisplay_context.mutex); 2361 2362 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2363 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2364 2365 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2366 2367 mutex_unlock(&psp->securedisplay_context.mutex); 2368 2369 if (ret) { 2370 psp_securedisplay_terminate(psp); 2371 /* free securedisplay shared memory */ 2372 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2373 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2374 return -EINVAL; 2375 } 2376 2377 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2378 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2379 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2380 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2381 /* don't try again */ 2382 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2383 } 2384 2385 return 0; 2386 } 2387 2388 static int psp_securedisplay_terminate(struct psp_context *psp) 2389 { 2390 int ret; 2391 2392 /* 2393 * TODO:bypass the terminate in sriov for now 2394 */ 2395 if (amdgpu_sriov_vf(psp->adev)) 2396 return 0; 2397 2398 if (!psp->securedisplay_context.context.initialized) 2399 return 0; 2400 2401 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2402 2403 psp->securedisplay_context.context.initialized = false; 2404 2405 return ret; 2406 } 2407 2408 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2409 { 2410 int ret; 2411 2412 if (!psp->securedisplay_context.context.initialized) 2413 return -EINVAL; 2414 2415 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2416 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC && 2417 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2) 2418 return -EINVAL; 2419 2420 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2421 2422 return ret; 2423 } 2424 /* SECUREDISPLAY end */ 2425 2426 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2427 { 2428 struct psp_context *psp = &adev->psp; 2429 int ret = 0; 2430 2431 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2432 ret = psp->funcs->wait_for_bootloader(psp); 2433 2434 return ret; 2435 } 2436 2437 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2438 { 2439 if (psp->funcs && 2440 psp->funcs->get_ras_capability) { 2441 return psp->funcs->get_ras_capability(psp); 2442 } else { 2443 return false; 2444 } 2445 } 2446 2447 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) 2448 { 2449 struct psp_context *psp = &adev->psp; 2450 2451 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2452 return false; 2453 2454 if (psp->funcs && psp->funcs->is_reload_needed) 2455 return psp->funcs->is_reload_needed(psp); 2456 2457 return false; 2458 } 2459 2460 static void psp_update_gpu_addresses(struct amdgpu_device *adev) 2461 { 2462 struct psp_context *psp = &adev->psp; 2463 2464 if (psp->cmd_buf_bo && psp->cmd_buf_mem) { 2465 psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo); 2466 psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo); 2467 psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo); 2468 } 2469 if (adev->firmware.rbuf && psp->km_ring.ring_mem) 2470 psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf); 2471 } 2472 2473 static int psp_hw_start(struct psp_context *psp) 2474 { 2475 struct amdgpu_device *adev = psp->adev; 2476 int ret; 2477 2478 if (amdgpu_virt_xgmi_migrate_enabled(adev)) 2479 psp_update_gpu_addresses(adev); 2480 2481 if (!amdgpu_sriov_vf(adev)) { 2482 if ((is_psp_fw_valid(psp->kdb)) && 2483 (psp->funcs->bootloader_load_kdb != NULL)) { 2484 ret = psp_bootloader_load_kdb(psp); 2485 if (ret) { 2486 dev_err(adev->dev, "PSP load kdb failed!\n"); 2487 return ret; 2488 } 2489 } 2490 2491 if ((is_psp_fw_valid(psp->spl)) && 2492 (psp->funcs->bootloader_load_spl != NULL)) { 2493 ret = psp_bootloader_load_spl(psp); 2494 if (ret) { 2495 dev_err(adev->dev, "PSP load spl failed!\n"); 2496 return ret; 2497 } 2498 } 2499 2500 if ((is_psp_fw_valid(psp->sys)) && 2501 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2502 ret = psp_bootloader_load_sysdrv(psp); 2503 if (ret) { 2504 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2505 return ret; 2506 } 2507 } 2508 2509 if ((is_psp_fw_valid(psp->soc_drv)) && 2510 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2511 ret = psp_bootloader_load_soc_drv(psp); 2512 if (ret) { 2513 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2514 return ret; 2515 } 2516 } 2517 2518 if ((is_psp_fw_valid(psp->intf_drv)) && 2519 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2520 ret = psp_bootloader_load_intf_drv(psp); 2521 if (ret) { 2522 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2523 return ret; 2524 } 2525 } 2526 2527 if ((is_psp_fw_valid(psp->dbg_drv)) && 2528 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2529 ret = psp_bootloader_load_dbg_drv(psp); 2530 if (ret) { 2531 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2532 return ret; 2533 } 2534 } 2535 2536 if ((is_psp_fw_valid(psp->ras_drv)) && 2537 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2538 ret = psp_bootloader_load_ras_drv(psp); 2539 if (ret) { 2540 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2541 return ret; 2542 } 2543 } 2544 2545 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2546 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2547 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2548 if (ret) { 2549 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2550 return ret; 2551 } 2552 } 2553 2554 if ((is_psp_fw_valid(psp->spdm_drv)) && 2555 (psp->funcs->bootloader_load_spdm_drv != NULL)) { 2556 ret = psp_bootloader_load_spdm_drv(psp); 2557 if (ret) { 2558 dev_err(adev->dev, "PSP load spdm_drv failed!\n"); 2559 return ret; 2560 } 2561 } 2562 2563 if ((is_psp_fw_valid(psp->sos)) && 2564 (psp->funcs->bootloader_load_sos != NULL)) { 2565 ret = psp_bootloader_load_sos(psp); 2566 if (ret) { 2567 dev_err(adev->dev, "PSP load sos failed!\n"); 2568 return ret; 2569 } 2570 } 2571 } 2572 2573 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2574 if (ret) { 2575 dev_err(adev->dev, "PSP create ring failed!\n"); 2576 return ret; 2577 } 2578 2579 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2580 ret = psp_update_fw_reservation(psp); 2581 if (ret) { 2582 dev_err(adev->dev, "update fw reservation failed!\n"); 2583 return ret; 2584 } 2585 } 2586 2587 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2588 goto skip_pin_bo; 2589 2590 if (!psp->boot_time_tmr || psp->autoload_supported) { 2591 ret = psp_tmr_init(psp); 2592 if (ret) { 2593 dev_err(adev->dev, "PSP tmr init failed!\n"); 2594 return ret; 2595 } 2596 } 2597 2598 skip_pin_bo: 2599 /* 2600 * For ASICs with DF Cstate management centralized 2601 * to PMFW, TMR setup should be performed after PMFW 2602 * loaded and before other non-psp firmware loaded. 2603 */ 2604 if (psp->pmfw_centralized_cstate_management) { 2605 ret = psp_load_smu_fw(psp); 2606 if (ret) 2607 return ret; 2608 } 2609 2610 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2611 ret = psp_tmr_load(psp); 2612 if (ret) { 2613 dev_err(adev->dev, "PSP load tmr failed!\n"); 2614 return ret; 2615 } 2616 } 2617 2618 return 0; 2619 } 2620 2621 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2622 enum psp_gfx_fw_type *type) 2623 { 2624 switch (ucode->ucode_id) { 2625 case AMDGPU_UCODE_ID_CAP: 2626 *type = GFX_FW_TYPE_CAP; 2627 break; 2628 case AMDGPU_UCODE_ID_SDMA0: 2629 *type = GFX_FW_TYPE_SDMA0; 2630 break; 2631 case AMDGPU_UCODE_ID_SDMA1: 2632 *type = GFX_FW_TYPE_SDMA1; 2633 break; 2634 case AMDGPU_UCODE_ID_SDMA2: 2635 *type = GFX_FW_TYPE_SDMA2; 2636 break; 2637 case AMDGPU_UCODE_ID_SDMA3: 2638 *type = GFX_FW_TYPE_SDMA3; 2639 break; 2640 case AMDGPU_UCODE_ID_SDMA4: 2641 *type = GFX_FW_TYPE_SDMA4; 2642 break; 2643 case AMDGPU_UCODE_ID_SDMA5: 2644 *type = GFX_FW_TYPE_SDMA5; 2645 break; 2646 case AMDGPU_UCODE_ID_SDMA6: 2647 *type = GFX_FW_TYPE_SDMA6; 2648 break; 2649 case AMDGPU_UCODE_ID_SDMA7: 2650 *type = GFX_FW_TYPE_SDMA7; 2651 break; 2652 case AMDGPU_UCODE_ID_CP_MES: 2653 *type = GFX_FW_TYPE_CP_MES; 2654 break; 2655 case AMDGPU_UCODE_ID_CP_MES_DATA: 2656 *type = GFX_FW_TYPE_MES_STACK; 2657 break; 2658 case AMDGPU_UCODE_ID_CP_MES1: 2659 *type = GFX_FW_TYPE_CP_MES_KIQ; 2660 break; 2661 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2662 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2663 break; 2664 case AMDGPU_UCODE_ID_CP_CE: 2665 *type = GFX_FW_TYPE_CP_CE; 2666 break; 2667 case AMDGPU_UCODE_ID_CP_PFP: 2668 *type = GFX_FW_TYPE_CP_PFP; 2669 break; 2670 case AMDGPU_UCODE_ID_CP_ME: 2671 *type = GFX_FW_TYPE_CP_ME; 2672 break; 2673 case AMDGPU_UCODE_ID_CP_MEC1: 2674 *type = GFX_FW_TYPE_CP_MEC; 2675 break; 2676 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2677 *type = GFX_FW_TYPE_CP_MEC_ME1; 2678 break; 2679 case AMDGPU_UCODE_ID_CP_MEC2: 2680 *type = GFX_FW_TYPE_CP_MEC; 2681 break; 2682 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2683 *type = GFX_FW_TYPE_CP_MEC_ME2; 2684 break; 2685 case AMDGPU_UCODE_ID_RLC_P: 2686 *type = GFX_FW_TYPE_RLC_P; 2687 break; 2688 case AMDGPU_UCODE_ID_RLC_V: 2689 *type = GFX_FW_TYPE_RLC_V; 2690 break; 2691 case AMDGPU_UCODE_ID_RLC_G: 2692 *type = GFX_FW_TYPE_RLC_G; 2693 break; 2694 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2695 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2696 break; 2697 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2698 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2699 break; 2700 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2701 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2702 break; 2703 case AMDGPU_UCODE_ID_RLC_IRAM: 2704 *type = GFX_FW_TYPE_RLC_IRAM; 2705 break; 2706 case AMDGPU_UCODE_ID_RLC_DRAM: 2707 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2708 break; 2709 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2710 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2711 break; 2712 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2713 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2714 break; 2715 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2716 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2717 break; 2718 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2719 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2720 break; 2721 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2722 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2723 break; 2724 case AMDGPU_UCODE_ID_SMC: 2725 *type = GFX_FW_TYPE_SMU; 2726 break; 2727 case AMDGPU_UCODE_ID_PPTABLE: 2728 *type = GFX_FW_TYPE_PPTABLE; 2729 break; 2730 case AMDGPU_UCODE_ID_UVD: 2731 *type = GFX_FW_TYPE_UVD; 2732 break; 2733 case AMDGPU_UCODE_ID_UVD1: 2734 *type = GFX_FW_TYPE_UVD1; 2735 break; 2736 case AMDGPU_UCODE_ID_VCE: 2737 *type = GFX_FW_TYPE_VCE; 2738 break; 2739 case AMDGPU_UCODE_ID_VCN: 2740 *type = GFX_FW_TYPE_VCN; 2741 break; 2742 case AMDGPU_UCODE_ID_VCN1: 2743 *type = GFX_FW_TYPE_VCN1; 2744 break; 2745 case AMDGPU_UCODE_ID_DMCU_ERAM: 2746 *type = GFX_FW_TYPE_DMCU_ERAM; 2747 break; 2748 case AMDGPU_UCODE_ID_DMCU_INTV: 2749 *type = GFX_FW_TYPE_DMCU_ISR; 2750 break; 2751 case AMDGPU_UCODE_ID_VCN0_RAM: 2752 *type = GFX_FW_TYPE_VCN0_RAM; 2753 break; 2754 case AMDGPU_UCODE_ID_VCN1_RAM: 2755 *type = GFX_FW_TYPE_VCN1_RAM; 2756 break; 2757 case AMDGPU_UCODE_ID_DMCUB: 2758 *type = GFX_FW_TYPE_DMUB; 2759 break; 2760 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2761 case AMDGPU_UCODE_ID_SDMA_RS64: 2762 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2763 break; 2764 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2765 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2766 break; 2767 case AMDGPU_UCODE_ID_IMU_I: 2768 *type = GFX_FW_TYPE_IMU_I; 2769 break; 2770 case AMDGPU_UCODE_ID_IMU_D: 2771 *type = GFX_FW_TYPE_IMU_D; 2772 break; 2773 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2774 *type = GFX_FW_TYPE_RS64_PFP; 2775 break; 2776 case AMDGPU_UCODE_ID_CP_RS64_ME: 2777 *type = GFX_FW_TYPE_RS64_ME; 2778 break; 2779 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2780 *type = GFX_FW_TYPE_RS64_MEC; 2781 break; 2782 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2783 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2784 break; 2785 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2786 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2787 break; 2788 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2789 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2790 break; 2791 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2792 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2793 break; 2794 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2795 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2796 break; 2797 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2798 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2799 break; 2800 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2801 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2802 break; 2803 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2804 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2805 break; 2806 case AMDGPU_UCODE_ID_VPE_CTX: 2807 *type = GFX_FW_TYPE_VPEC_FW1; 2808 break; 2809 case AMDGPU_UCODE_ID_VPE_CTL: 2810 *type = GFX_FW_TYPE_VPEC_FW2; 2811 break; 2812 case AMDGPU_UCODE_ID_VPE: 2813 *type = GFX_FW_TYPE_VPE; 2814 break; 2815 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2816 *type = GFX_FW_TYPE_UMSCH_UCODE; 2817 break; 2818 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2819 *type = GFX_FW_TYPE_UMSCH_DATA; 2820 break; 2821 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2822 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2823 break; 2824 case AMDGPU_UCODE_ID_P2S_TABLE: 2825 *type = GFX_FW_TYPE_P2S_TABLE; 2826 break; 2827 case AMDGPU_UCODE_ID_JPEG_RAM: 2828 *type = GFX_FW_TYPE_JPEG_RAM; 2829 break; 2830 case AMDGPU_UCODE_ID_ISP: 2831 *type = GFX_FW_TYPE_ISP; 2832 break; 2833 case AMDGPU_UCODE_ID_MAXIMUM: 2834 default: 2835 return -EINVAL; 2836 } 2837 2838 return 0; 2839 } 2840 2841 static void psp_print_fw_hdr(struct psp_context *psp, 2842 struct amdgpu_firmware_info *ucode) 2843 { 2844 struct amdgpu_device *adev = psp->adev; 2845 struct common_firmware_header *hdr; 2846 2847 switch (ucode->ucode_id) { 2848 case AMDGPU_UCODE_ID_SDMA0: 2849 case AMDGPU_UCODE_ID_SDMA1: 2850 case AMDGPU_UCODE_ID_SDMA2: 2851 case AMDGPU_UCODE_ID_SDMA3: 2852 case AMDGPU_UCODE_ID_SDMA4: 2853 case AMDGPU_UCODE_ID_SDMA5: 2854 case AMDGPU_UCODE_ID_SDMA6: 2855 case AMDGPU_UCODE_ID_SDMA7: 2856 hdr = (struct common_firmware_header *) 2857 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2858 amdgpu_ucode_print_sdma_hdr(hdr); 2859 break; 2860 case AMDGPU_UCODE_ID_CP_CE: 2861 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2862 amdgpu_ucode_print_gfx_hdr(hdr); 2863 break; 2864 case AMDGPU_UCODE_ID_CP_PFP: 2865 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2866 amdgpu_ucode_print_gfx_hdr(hdr); 2867 break; 2868 case AMDGPU_UCODE_ID_CP_ME: 2869 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2870 amdgpu_ucode_print_gfx_hdr(hdr); 2871 break; 2872 case AMDGPU_UCODE_ID_CP_MEC1: 2873 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2874 amdgpu_ucode_print_gfx_hdr(hdr); 2875 break; 2876 case AMDGPU_UCODE_ID_RLC_G: 2877 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2878 amdgpu_ucode_print_rlc_hdr(hdr); 2879 break; 2880 case AMDGPU_UCODE_ID_SMC: 2881 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2882 amdgpu_ucode_print_smc_hdr(hdr); 2883 break; 2884 default: 2885 break; 2886 } 2887 } 2888 2889 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2890 struct amdgpu_firmware_info *ucode, 2891 struct psp_gfx_cmd_resp *cmd) 2892 { 2893 int ret; 2894 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2895 2896 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2897 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2898 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2899 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2900 2901 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2902 if (ret) 2903 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2904 2905 return ret; 2906 } 2907 2908 int psp_execute_ip_fw_load(struct psp_context *psp, 2909 struct amdgpu_firmware_info *ucode) 2910 { 2911 int ret = 0; 2912 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2913 2914 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2915 if (!ret) { 2916 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2917 psp->fence_buf_mc_addr); 2918 } 2919 2920 release_psp_cmd_buf(psp); 2921 2922 return ret; 2923 } 2924 2925 static int psp_load_p2s_table(struct psp_context *psp) 2926 { 2927 int ret; 2928 struct amdgpu_device *adev = psp->adev; 2929 struct amdgpu_firmware_info *ucode = 2930 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2931 2932 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2933 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2934 return 0; 2935 2936 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 2937 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 2938 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2939 0x0036003C; 2940 if (psp->sos.fw_version < supp_vers) 2941 return 0; 2942 } 2943 2944 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2945 return 0; 2946 2947 ret = psp_execute_ip_fw_load(psp, ucode); 2948 2949 return ret; 2950 } 2951 2952 static int psp_load_smu_fw(struct psp_context *psp) 2953 { 2954 int ret; 2955 struct amdgpu_device *adev = psp->adev; 2956 struct amdgpu_firmware_info *ucode = 2957 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2958 struct amdgpu_ras *ras = psp->ras_context.ras; 2959 2960 /* 2961 * Skip SMU FW reloading in case of using BACO for runpm only, 2962 * as SMU is always alive. 2963 */ 2964 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2965 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2966 return 0; 2967 2968 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2969 return 0; 2970 2971 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2972 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2973 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2974 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2975 if (ret) 2976 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2977 } 2978 2979 ret = psp_execute_ip_fw_load(psp, ucode); 2980 2981 if (ret) 2982 dev_err(adev->dev, "PSP load smu failed!\n"); 2983 2984 return ret; 2985 } 2986 2987 static bool fw_load_skip_check(struct psp_context *psp, 2988 struct amdgpu_firmware_info *ucode) 2989 { 2990 if (!ucode->fw || !ucode->ucode_size) 2991 return true; 2992 2993 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2994 return true; 2995 2996 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2997 (psp_smu_reload_quirk(psp) || 2998 psp->autoload_supported || 2999 psp->pmfw_centralized_cstate_management)) 3000 return true; 3001 3002 if (amdgpu_sriov_vf(psp->adev) && 3003 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 3004 return true; 3005 3006 if (psp->autoload_supported && 3007 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 3008 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 3009 /* skip mec JT when autoload is enabled */ 3010 return true; 3011 3012 return false; 3013 } 3014 3015 int psp_load_fw_list(struct psp_context *psp, 3016 struct amdgpu_firmware_info **ucode_list, int ucode_count) 3017 { 3018 int ret = 0, i; 3019 struct amdgpu_firmware_info *ucode; 3020 3021 for (i = 0; i < ucode_count; ++i) { 3022 ucode = ucode_list[i]; 3023 psp_print_fw_hdr(psp, ucode); 3024 ret = psp_execute_ip_fw_load(psp, ucode); 3025 if (ret) 3026 return ret; 3027 } 3028 return ret; 3029 } 3030 3031 static int psp_load_non_psp_fw(struct psp_context *psp) 3032 { 3033 int i, ret; 3034 struct amdgpu_firmware_info *ucode; 3035 struct amdgpu_device *adev = psp->adev; 3036 3037 if (psp->autoload_supported && 3038 !psp->pmfw_centralized_cstate_management) { 3039 ret = psp_load_smu_fw(psp); 3040 if (ret) 3041 return ret; 3042 } 3043 3044 /* Load P2S table first if it's available */ 3045 psp_load_p2s_table(psp); 3046 3047 for (i = 0; i < adev->firmware.max_ucodes; i++) { 3048 ucode = &adev->firmware.ucode[i]; 3049 3050 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 3051 !fw_load_skip_check(psp, ucode)) { 3052 ret = psp_load_smu_fw(psp); 3053 if (ret) 3054 return ret; 3055 continue; 3056 } 3057 3058 if (fw_load_skip_check(psp, ucode)) 3059 continue; 3060 3061 if (psp->autoload_supported && 3062 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3063 IP_VERSION(11, 0, 7) || 3064 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3065 IP_VERSION(11, 0, 11) || 3066 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3067 IP_VERSION(11, 0, 12)) && 3068 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 3069 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 3070 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 3071 /* PSP only receive one SDMA fw for sienna_cichlid, 3072 * as all four sdma fw are same 3073 */ 3074 continue; 3075 3076 psp_print_fw_hdr(psp, ucode); 3077 3078 ret = psp_execute_ip_fw_load(psp, ucode); 3079 if (ret) 3080 return ret; 3081 3082 /* Start rlc autoload after psp received all the gfx firmware */ 3083 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 3084 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 3085 ret = psp_rlc_autoload_start(psp); 3086 if (ret) { 3087 dev_err(adev->dev, "Failed to start rlc autoload\n"); 3088 return ret; 3089 } 3090 } 3091 } 3092 3093 return 0; 3094 } 3095 3096 static int psp_load_fw(struct amdgpu_device *adev) 3097 { 3098 int ret; 3099 struct psp_context *psp = &adev->psp; 3100 3101 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3102 /* should not destroy ring, only stop */ 3103 psp_ring_stop(psp, PSP_RING_TYPE__KM); 3104 } else { 3105 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 3106 3107 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 3108 if (ret) { 3109 dev_err(adev->dev, "PSP ring init failed!\n"); 3110 goto failed; 3111 } 3112 } 3113 3114 ret = psp_hw_start(psp); 3115 if (ret) 3116 goto failed; 3117 3118 ret = psp_load_non_psp_fw(psp); 3119 if (ret) 3120 goto failed1; 3121 3122 ret = psp_asd_initialize(psp); 3123 if (ret) { 3124 dev_err(adev->dev, "PSP load asd failed!\n"); 3125 goto failed1; 3126 } 3127 3128 ret = psp_rl_load(adev); 3129 if (ret) { 3130 dev_err(adev->dev, "PSP load RL failed!\n"); 3131 goto failed1; 3132 } 3133 3134 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3135 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3136 ret = psp_xgmi_initialize(psp, false, true); 3137 /* Warning the XGMI seesion initialize failure 3138 * Instead of stop driver initialization 3139 */ 3140 if (ret) 3141 dev_err(psp->adev->dev, 3142 "XGMI: Failed to initialize XGMI session\n"); 3143 } 3144 } 3145 3146 if (psp->ta_fw) { 3147 ret = psp_ras_initialize(psp); 3148 if (ret) 3149 dev_err(psp->adev->dev, 3150 "RAS: Failed to initialize RAS\n"); 3151 3152 ret = psp_hdcp_initialize(psp); 3153 if (ret) 3154 dev_err(psp->adev->dev, 3155 "HDCP: Failed to initialize HDCP\n"); 3156 3157 ret = psp_dtm_initialize(psp); 3158 if (ret) 3159 dev_err(psp->adev->dev, 3160 "DTM: Failed to initialize DTM\n"); 3161 3162 ret = psp_rap_initialize(psp); 3163 if (ret) 3164 dev_err(psp->adev->dev, 3165 "RAP: Failed to initialize RAP\n"); 3166 3167 ret = psp_securedisplay_initialize(psp); 3168 if (ret) 3169 dev_err(psp->adev->dev, 3170 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3171 } 3172 3173 return 0; 3174 3175 failed1: 3176 psp_free_shared_bufs(psp); 3177 failed: 3178 /* 3179 * all cleanup jobs (xgmi terminate, ras terminate, 3180 * ring destroy, cmd/fence/fw buffers destory, 3181 * psp->cmd destory) are delayed to psp_hw_fini 3182 */ 3183 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3184 return ret; 3185 } 3186 3187 static int psp_hw_init(struct amdgpu_ip_block *ip_block) 3188 { 3189 int ret; 3190 struct amdgpu_device *adev = ip_block->adev; 3191 3192 mutex_lock(&adev->firmware.mutex); 3193 3194 ret = amdgpu_ucode_init_bo(adev); 3195 if (ret) 3196 goto failed; 3197 3198 ret = psp_load_fw(adev); 3199 if (ret) { 3200 dev_err(adev->dev, "PSP firmware loading failed\n"); 3201 goto failed; 3202 } 3203 3204 mutex_unlock(&adev->firmware.mutex); 3205 return 0; 3206 3207 failed: 3208 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 3209 mutex_unlock(&adev->firmware.mutex); 3210 return -EINVAL; 3211 } 3212 3213 static int psp_hw_fini(struct amdgpu_ip_block *ip_block) 3214 { 3215 struct amdgpu_device *adev = ip_block->adev; 3216 struct psp_context *psp = &adev->psp; 3217 3218 if (psp->ta_fw) { 3219 psp_ras_terminate(psp); 3220 psp_securedisplay_terminate(psp); 3221 psp_rap_terminate(psp); 3222 psp_dtm_terminate(psp); 3223 psp_hdcp_terminate(psp); 3224 3225 if (adev->gmc.xgmi.num_physical_nodes > 1) 3226 psp_xgmi_terminate(psp); 3227 } 3228 3229 psp_asd_terminate(psp); 3230 psp_tmr_terminate(psp); 3231 3232 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3233 3234 return 0; 3235 } 3236 3237 static int psp_suspend(struct amdgpu_ip_block *ip_block) 3238 { 3239 int ret = 0; 3240 struct amdgpu_device *adev = ip_block->adev; 3241 struct psp_context *psp = &adev->psp; 3242 3243 if (adev->gmc.xgmi.num_physical_nodes > 1 && 3244 psp->xgmi_context.context.initialized) { 3245 ret = psp_xgmi_terminate(psp); 3246 if (ret) { 3247 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 3248 goto out; 3249 } 3250 } 3251 3252 if (psp->ta_fw) { 3253 ret = psp_ras_terminate(psp); 3254 if (ret) { 3255 dev_err(adev->dev, "Failed to terminate ras ta\n"); 3256 goto out; 3257 } 3258 ret = psp_hdcp_terminate(psp); 3259 if (ret) { 3260 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 3261 goto out; 3262 } 3263 ret = psp_dtm_terminate(psp); 3264 if (ret) { 3265 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 3266 goto out; 3267 } 3268 ret = psp_rap_terminate(psp); 3269 if (ret) { 3270 dev_err(adev->dev, "Failed to terminate rap ta\n"); 3271 goto out; 3272 } 3273 ret = psp_securedisplay_terminate(psp); 3274 if (ret) { 3275 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 3276 goto out; 3277 } 3278 } 3279 3280 ret = psp_asd_terminate(psp); 3281 if (ret) { 3282 dev_err(adev->dev, "Failed to terminate asd\n"); 3283 goto out; 3284 } 3285 3286 ret = psp_tmr_terminate(psp); 3287 if (ret) { 3288 dev_err(adev->dev, "Failed to terminate tmr\n"); 3289 goto out; 3290 } 3291 3292 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 3293 if (ret) 3294 dev_err(adev->dev, "PSP ring stop failed\n"); 3295 3296 out: 3297 return ret; 3298 } 3299 3300 static int psp_resume(struct amdgpu_ip_block *ip_block) 3301 { 3302 int ret; 3303 struct amdgpu_device *adev = ip_block->adev; 3304 struct psp_context *psp = &adev->psp; 3305 3306 dev_info(adev->dev, "PSP is resuming...\n"); 3307 3308 if (psp->mem_train_ctx.enable_mem_training) { 3309 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3310 if (ret) { 3311 dev_err(adev->dev, "Failed to process memory training!\n"); 3312 return ret; 3313 } 3314 } 3315 3316 mutex_lock(&adev->firmware.mutex); 3317 3318 ret = amdgpu_ucode_init_bo(adev); 3319 if (ret) 3320 goto failed; 3321 3322 ret = psp_hw_start(psp); 3323 if (ret) 3324 goto failed; 3325 3326 ret = psp_load_non_psp_fw(psp); 3327 if (ret) 3328 goto failed; 3329 3330 ret = psp_asd_initialize(psp); 3331 if (ret) { 3332 dev_err(adev->dev, "PSP load asd failed!\n"); 3333 goto failed; 3334 } 3335 3336 ret = psp_rl_load(adev); 3337 if (ret) { 3338 dev_err(adev->dev, "PSP load RL failed!\n"); 3339 goto failed; 3340 } 3341 3342 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3343 ret = psp_xgmi_initialize(psp, false, true); 3344 /* Warning the XGMI seesion initialize failure 3345 * Instead of stop driver initialization 3346 */ 3347 if (ret) 3348 dev_err(psp->adev->dev, 3349 "XGMI: Failed to initialize XGMI session\n"); 3350 } 3351 3352 if (psp->ta_fw) { 3353 ret = psp_ras_initialize(psp); 3354 if (ret) 3355 dev_err(psp->adev->dev, 3356 "RAS: Failed to initialize RAS\n"); 3357 3358 ret = psp_hdcp_initialize(psp); 3359 if (ret) 3360 dev_err(psp->adev->dev, 3361 "HDCP: Failed to initialize HDCP\n"); 3362 3363 ret = psp_dtm_initialize(psp); 3364 if (ret) 3365 dev_err(psp->adev->dev, 3366 "DTM: Failed to initialize DTM\n"); 3367 3368 ret = psp_rap_initialize(psp); 3369 if (ret) 3370 dev_err(psp->adev->dev, 3371 "RAP: Failed to initialize RAP\n"); 3372 3373 ret = psp_securedisplay_initialize(psp); 3374 if (ret) 3375 dev_err(psp->adev->dev, 3376 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3377 } 3378 3379 mutex_unlock(&adev->firmware.mutex); 3380 3381 return 0; 3382 3383 failed: 3384 dev_err(adev->dev, "PSP resume failed\n"); 3385 mutex_unlock(&adev->firmware.mutex); 3386 return ret; 3387 } 3388 3389 int psp_gpu_reset(struct amdgpu_device *adev) 3390 { 3391 int ret; 3392 3393 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3394 return 0; 3395 3396 mutex_lock(&adev->psp.mutex); 3397 ret = psp_mode1_reset(&adev->psp); 3398 mutex_unlock(&adev->psp.mutex); 3399 3400 return ret; 3401 } 3402 3403 int psp_rlc_autoload_start(struct psp_context *psp) 3404 { 3405 int ret; 3406 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3407 3408 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3409 3410 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3411 psp->fence_buf_mc_addr); 3412 3413 release_psp_cmd_buf(psp); 3414 3415 return ret; 3416 } 3417 3418 int psp_ring_cmd_submit(struct psp_context *psp, 3419 uint64_t cmd_buf_mc_addr, 3420 uint64_t fence_mc_addr, 3421 int index) 3422 { 3423 unsigned int psp_write_ptr_reg = 0; 3424 struct psp_gfx_rb_frame *write_frame; 3425 struct psp_ring *ring = &psp->km_ring; 3426 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3427 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3428 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3429 struct amdgpu_device *adev = psp->adev; 3430 uint32_t ring_size_dw = ring->ring_size / 4; 3431 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3432 3433 /* KM (GPCOM) prepare write pointer */ 3434 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3435 3436 /* Update KM RB frame pointer to new frame */ 3437 /* write_frame ptr increments by size of rb_frame in bytes */ 3438 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3439 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3440 write_frame = ring_buffer_start; 3441 else 3442 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3443 /* Check invalid write_frame ptr address */ 3444 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3445 dev_err(adev->dev, 3446 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3447 ring_buffer_start, ring_buffer_end, write_frame); 3448 dev_err(adev->dev, 3449 "write_frame is pointing to address out of bounds\n"); 3450 return -EINVAL; 3451 } 3452 3453 /* Initialize KM RB frame */ 3454 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3455 3456 /* Update KM RB frame */ 3457 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3458 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3459 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3460 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3461 write_frame->fence_value = index; 3462 amdgpu_device_flush_hdp(adev, NULL); 3463 3464 /* Update the write Pointer in DWORDs */ 3465 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3466 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3467 return 0; 3468 } 3469 3470 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3471 { 3472 struct amdgpu_device *adev = psp->adev; 3473 const struct psp_firmware_header_v1_0 *asd_hdr; 3474 int err = 0; 3475 3476 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED, 3477 "amdgpu/%s_asd.bin", chip_name); 3478 if (err) 3479 goto out; 3480 3481 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3482 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3483 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3484 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3485 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3486 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3487 return 0; 3488 out: 3489 amdgpu_ucode_release(&adev->psp.asd_fw); 3490 return err; 3491 } 3492 3493 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3494 { 3495 struct amdgpu_device *adev = psp->adev; 3496 const struct psp_firmware_header_v1_0 *toc_hdr; 3497 int err = 0; 3498 3499 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED, 3500 "amdgpu/%s_toc.bin", chip_name); 3501 if (err) 3502 goto out; 3503 3504 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3505 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3506 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3507 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3508 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3509 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3510 return 0; 3511 out: 3512 amdgpu_ucode_release(&adev->psp.toc_fw); 3513 return err; 3514 } 3515 3516 static int parse_sos_bin_descriptor(struct psp_context *psp, 3517 const struct psp_fw_bin_desc *desc, 3518 const struct psp_firmware_header_v2_0 *sos_hdr) 3519 { 3520 uint8_t *ucode_start_addr = NULL; 3521 3522 if (!psp || !desc || !sos_hdr) 3523 return -EINVAL; 3524 3525 ucode_start_addr = (uint8_t *)sos_hdr + 3526 le32_to_cpu(desc->offset_bytes) + 3527 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3528 3529 switch (desc->fw_type) { 3530 case PSP_FW_TYPE_PSP_SOS: 3531 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3532 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3533 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3534 psp->sos.start_addr = ucode_start_addr; 3535 break; 3536 case PSP_FW_TYPE_PSP_SYS_DRV: 3537 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3538 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3539 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3540 psp->sys.start_addr = ucode_start_addr; 3541 break; 3542 case PSP_FW_TYPE_PSP_KDB: 3543 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3544 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3545 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3546 psp->kdb.start_addr = ucode_start_addr; 3547 break; 3548 case PSP_FW_TYPE_PSP_TOC: 3549 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3550 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3551 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3552 psp->toc.start_addr = ucode_start_addr; 3553 break; 3554 case PSP_FW_TYPE_PSP_SPL: 3555 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3556 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3557 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3558 psp->spl.start_addr = ucode_start_addr; 3559 break; 3560 case PSP_FW_TYPE_PSP_RL: 3561 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3562 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3563 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3564 psp->rl.start_addr = ucode_start_addr; 3565 break; 3566 case PSP_FW_TYPE_PSP_SOC_DRV: 3567 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3568 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3569 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3570 psp->soc_drv.start_addr = ucode_start_addr; 3571 break; 3572 case PSP_FW_TYPE_PSP_INTF_DRV: 3573 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3574 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3575 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3576 psp->intf_drv.start_addr = ucode_start_addr; 3577 break; 3578 case PSP_FW_TYPE_PSP_DBG_DRV: 3579 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3580 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3581 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3582 psp->dbg_drv.start_addr = ucode_start_addr; 3583 break; 3584 case PSP_FW_TYPE_PSP_RAS_DRV: 3585 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3586 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3587 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3588 psp->ras_drv.start_addr = ucode_start_addr; 3589 break; 3590 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3591 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3592 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3593 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3594 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3595 break; 3596 case PSP_FW_TYPE_PSP_SPDM_DRV: 3597 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version); 3598 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version); 3599 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3600 psp->spdm_drv.start_addr = ucode_start_addr; 3601 break; 3602 default: 3603 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3604 break; 3605 } 3606 3607 return 0; 3608 } 3609 3610 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3611 { 3612 const struct psp_firmware_header_v1_0 *sos_hdr; 3613 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3614 uint8_t *ucode_array_start_addr; 3615 3616 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3617 ucode_array_start_addr = (uint8_t *)sos_hdr + 3618 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3619 3620 if (adev->gmc.xgmi.connected_to_cpu || 3621 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3622 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3623 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3624 3625 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3626 adev->psp.sys.start_addr = ucode_array_start_addr; 3627 3628 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3629 adev->psp.sos.start_addr = ucode_array_start_addr + 3630 le32_to_cpu(sos_hdr->sos.offset_bytes); 3631 } else { 3632 /* Load alternate PSP SOS FW */ 3633 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3634 3635 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3636 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3637 3638 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3639 adev->psp.sys.start_addr = ucode_array_start_addr + 3640 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3641 3642 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3643 adev->psp.sos.start_addr = ucode_array_start_addr + 3644 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3645 } 3646 3647 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3648 dev_warn(adev->dev, "PSP SOS FW not available"); 3649 return -EINVAL; 3650 } 3651 3652 return 0; 3653 } 3654 3655 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3656 { 3657 struct amdgpu_device *adev = psp->adev; 3658 const struct psp_firmware_header_v1_0 *sos_hdr; 3659 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3660 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3661 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3662 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3663 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; 3664 int fw_index, fw_bin_count, start_index = 0; 3665 const struct psp_fw_bin_desc *fw_bin; 3666 uint8_t *ucode_array_start_addr; 3667 int err = 0; 3668 3669 if (amdgpu_is_kicker_fw(adev)) 3670 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3671 "amdgpu/%s_sos_kicker.bin", chip_name); 3672 else 3673 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3674 "amdgpu/%s_sos.bin", chip_name); 3675 if (err) 3676 goto out; 3677 3678 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3679 ucode_array_start_addr = (uint8_t *)sos_hdr + 3680 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3681 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3682 3683 switch (sos_hdr->header.header_version_major) { 3684 case 1: 3685 err = psp_init_sos_base_fw(adev); 3686 if (err) 3687 goto out; 3688 3689 if (sos_hdr->header.header_version_minor == 1) { 3690 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3691 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3692 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3693 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3694 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3695 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3696 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3697 } 3698 if (sos_hdr->header.header_version_minor == 2) { 3699 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3700 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3701 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3702 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3703 } 3704 if (sos_hdr->header.header_version_minor == 3) { 3705 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3706 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3707 adev->psp.toc.start_addr = ucode_array_start_addr + 3708 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3709 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3710 adev->psp.kdb.start_addr = ucode_array_start_addr + 3711 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3712 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3713 adev->psp.spl.start_addr = ucode_array_start_addr + 3714 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3715 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3716 adev->psp.rl.start_addr = ucode_array_start_addr + 3717 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3718 } 3719 break; 3720 case 2: 3721 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3722 3723 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); 3724 3725 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { 3726 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3727 err = -EINVAL; 3728 goto out; 3729 } 3730 3731 if (sos_hdr_v2_0->header.header_version_minor == 1) { 3732 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; 3733 3734 fw_bin = sos_hdr_v2_1->psp_fw_bin; 3735 3736 if (psp_is_aux_sos_load_required(psp)) 3737 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3738 else 3739 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3740 3741 } else { 3742 fw_bin = sos_hdr_v2_0->psp_fw_bin; 3743 } 3744 3745 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { 3746 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, 3747 sos_hdr_v2_0); 3748 if (err) 3749 goto out; 3750 } 3751 break; 3752 default: 3753 dev_err(adev->dev, 3754 "unsupported psp sos firmware\n"); 3755 err = -EINVAL; 3756 goto out; 3757 } 3758 3759 return 0; 3760 out: 3761 amdgpu_ucode_release(&adev->psp.sos_fw); 3762 3763 return err; 3764 } 3765 3766 static bool is_ta_fw_applicable(struct psp_context *psp, 3767 const struct psp_fw_bin_desc *desc) 3768 { 3769 struct amdgpu_device *adev = psp->adev; 3770 uint32_t fw_version; 3771 3772 switch (desc->fw_type) { 3773 case TA_FW_TYPE_PSP_XGMI: 3774 case TA_FW_TYPE_PSP_XGMI_AUX: 3775 /* for now, AUX TA only exists on 13.0.6 ta bin, 3776 * from v20.00.0x.14 3777 */ 3778 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3779 IP_VERSION(13, 0, 6)) { 3780 fw_version = le32_to_cpu(desc->fw_version); 3781 3782 if (adev->flags & AMD_IS_APU && 3783 (fw_version & 0xff) >= 0x14) 3784 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; 3785 else 3786 return desc->fw_type == TA_FW_TYPE_PSP_XGMI; 3787 } 3788 break; 3789 default: 3790 break; 3791 } 3792 3793 return true; 3794 } 3795 3796 static int parse_ta_bin_descriptor(struct psp_context *psp, 3797 const struct psp_fw_bin_desc *desc, 3798 const struct ta_firmware_header_v2_0 *ta_hdr) 3799 { 3800 uint8_t *ucode_start_addr = NULL; 3801 3802 if (!psp || !desc || !ta_hdr) 3803 return -EINVAL; 3804 3805 if (!is_ta_fw_applicable(psp, desc)) 3806 return 0; 3807 3808 ucode_start_addr = (uint8_t *)ta_hdr + 3809 le32_to_cpu(desc->offset_bytes) + 3810 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3811 3812 switch (desc->fw_type) { 3813 case TA_FW_TYPE_PSP_ASD: 3814 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3815 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3816 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3817 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3818 break; 3819 case TA_FW_TYPE_PSP_XGMI: 3820 case TA_FW_TYPE_PSP_XGMI_AUX: 3821 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3822 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3823 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3824 break; 3825 case TA_FW_TYPE_PSP_RAS: 3826 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3827 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3828 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3829 break; 3830 case TA_FW_TYPE_PSP_HDCP: 3831 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3832 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3833 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3834 break; 3835 case TA_FW_TYPE_PSP_DTM: 3836 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3837 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3838 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3839 break; 3840 case TA_FW_TYPE_PSP_RAP: 3841 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3842 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3843 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3844 break; 3845 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3846 psp->securedisplay_context.context.bin_desc.fw_version = 3847 le32_to_cpu(desc->fw_version); 3848 psp->securedisplay_context.context.bin_desc.size_bytes = 3849 le32_to_cpu(desc->size_bytes); 3850 psp->securedisplay_context.context.bin_desc.start_addr = 3851 ucode_start_addr; 3852 break; 3853 default: 3854 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3855 break; 3856 } 3857 3858 return 0; 3859 } 3860 3861 static int parse_ta_v1_microcode(struct psp_context *psp) 3862 { 3863 const struct ta_firmware_header_v1_0 *ta_hdr; 3864 struct amdgpu_device *adev = psp->adev; 3865 3866 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3867 3868 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3869 return -EINVAL; 3870 3871 adev->psp.xgmi_context.context.bin_desc.fw_version = 3872 le32_to_cpu(ta_hdr->xgmi.fw_version); 3873 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3874 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3875 adev->psp.xgmi_context.context.bin_desc.start_addr = 3876 (uint8_t *)ta_hdr + 3877 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3878 3879 adev->psp.ras_context.context.bin_desc.fw_version = 3880 le32_to_cpu(ta_hdr->ras.fw_version); 3881 adev->psp.ras_context.context.bin_desc.size_bytes = 3882 le32_to_cpu(ta_hdr->ras.size_bytes); 3883 adev->psp.ras_context.context.bin_desc.start_addr = 3884 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3885 le32_to_cpu(ta_hdr->ras.offset_bytes); 3886 3887 adev->psp.hdcp_context.context.bin_desc.fw_version = 3888 le32_to_cpu(ta_hdr->hdcp.fw_version); 3889 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3890 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3891 adev->psp.hdcp_context.context.bin_desc.start_addr = 3892 (uint8_t *)ta_hdr + 3893 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3894 3895 adev->psp.dtm_context.context.bin_desc.fw_version = 3896 le32_to_cpu(ta_hdr->dtm.fw_version); 3897 adev->psp.dtm_context.context.bin_desc.size_bytes = 3898 le32_to_cpu(ta_hdr->dtm.size_bytes); 3899 adev->psp.dtm_context.context.bin_desc.start_addr = 3900 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3901 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3902 3903 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3904 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3905 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3906 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3907 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3908 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3909 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3910 3911 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3912 3913 return 0; 3914 } 3915 3916 static int parse_ta_v2_microcode(struct psp_context *psp) 3917 { 3918 const struct ta_firmware_header_v2_0 *ta_hdr; 3919 struct amdgpu_device *adev = psp->adev; 3920 int err = 0; 3921 int ta_index = 0; 3922 3923 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3924 3925 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3926 return -EINVAL; 3927 3928 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3929 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3930 return -EINVAL; 3931 } 3932 3933 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3934 err = parse_ta_bin_descriptor(psp, 3935 &ta_hdr->ta_fw_bin[ta_index], 3936 ta_hdr); 3937 if (err) 3938 return err; 3939 } 3940 3941 return 0; 3942 } 3943 3944 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3945 { 3946 const struct common_firmware_header *hdr; 3947 struct amdgpu_device *adev = psp->adev; 3948 int err; 3949 3950 if (amdgpu_is_kicker_fw(adev)) 3951 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3952 "amdgpu/%s_ta_kicker.bin", chip_name); 3953 else 3954 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3955 "amdgpu/%s_ta.bin", chip_name); 3956 if (err) 3957 return err; 3958 3959 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3960 switch (le16_to_cpu(hdr->header_version_major)) { 3961 case 1: 3962 err = parse_ta_v1_microcode(psp); 3963 break; 3964 case 2: 3965 err = parse_ta_v2_microcode(psp); 3966 break; 3967 default: 3968 dev_err(adev->dev, "unsupported TA header version\n"); 3969 err = -EINVAL; 3970 } 3971 3972 if (err) 3973 amdgpu_ucode_release(&adev->psp.ta_fw); 3974 3975 return err; 3976 } 3977 3978 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3979 { 3980 struct amdgpu_device *adev = psp->adev; 3981 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3982 struct amdgpu_firmware_info *info = NULL; 3983 int err = 0; 3984 3985 if (!amdgpu_sriov_vf(adev)) { 3986 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3987 return -EINVAL; 3988 } 3989 3990 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL, 3991 "amdgpu/%s_cap.bin", chip_name); 3992 if (err) { 3993 if (err == -ENODEV) { 3994 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3995 err = 0; 3996 } else { 3997 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3998 } 3999 goto out; 4000 } 4001 4002 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 4003 info->ucode_id = AMDGPU_UCODE_ID_CAP; 4004 info->fw = adev->psp.cap_fw; 4005 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 4006 adev->psp.cap_fw->data; 4007 adev->firmware.fw_size += ALIGN( 4008 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 4009 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 4010 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 4011 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 4012 4013 return 0; 4014 4015 out: 4016 amdgpu_ucode_release(&adev->psp.cap_fw); 4017 return err; 4018 } 4019 4020 int psp_config_sq_perfmon(struct psp_context *psp, 4021 uint32_t xcp_id, bool core_override_enable, 4022 bool reg_override_enable, bool perfmon_override_enable) 4023 { 4024 int ret; 4025 4026 if (amdgpu_sriov_vf(psp->adev)) 4027 return 0; 4028 4029 if (xcp_id > MAX_XCP) { 4030 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); 4031 return -EINVAL; 4032 } 4033 4034 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { 4035 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", 4036 amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); 4037 return -EINVAL; 4038 } 4039 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 4040 4041 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; 4042 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); 4043 cmd->cmd.config_sq_perfmon.core_override = core_override_enable; 4044 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; 4045 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; 4046 4047 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 4048 if (ret) 4049 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", 4050 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); 4051 4052 release_psp_cmd_buf(psp); 4053 return ret; 4054 } 4055 4056 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 4057 enum amd_clockgating_state state) 4058 { 4059 return 0; 4060 } 4061 4062 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block, 4063 enum amd_powergating_state state) 4064 { 4065 return 0; 4066 } 4067 4068 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 4069 struct device_attribute *attr, 4070 char *buf) 4071 { 4072 struct drm_device *ddev = dev_get_drvdata(dev); 4073 struct amdgpu_device *adev = drm_to_adev(ddev); 4074 struct amdgpu_ip_block *ip_block; 4075 uint32_t fw_ver; 4076 int ret; 4077 4078 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4079 if (!ip_block || !ip_block->status.late_initialized) { 4080 dev_info(adev->dev, "PSP block is not ready yet\n."); 4081 return -EBUSY; 4082 } 4083 4084 mutex_lock(&adev->psp.mutex); 4085 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 4086 mutex_unlock(&adev->psp.mutex); 4087 4088 if (ret) { 4089 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 4090 return ret; 4091 } 4092 4093 return sysfs_emit(buf, "%x\n", fw_ver); 4094 } 4095 4096 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 4097 struct device_attribute *attr, 4098 const char *buf, 4099 size_t count) 4100 { 4101 struct drm_device *ddev = dev_get_drvdata(dev); 4102 struct amdgpu_device *adev = drm_to_adev(ddev); 4103 int ret, idx; 4104 const struct firmware *usbc_pd_fw; 4105 struct amdgpu_bo *fw_buf_bo = NULL; 4106 uint64_t fw_pri_mc_addr; 4107 void *fw_pri_cpu_addr; 4108 struct amdgpu_ip_block *ip_block; 4109 4110 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4111 if (!ip_block || !ip_block->status.late_initialized) { 4112 dev_err(adev->dev, "PSP block is not ready yet."); 4113 return -EBUSY; 4114 } 4115 4116 if (!drm_dev_enter(ddev, &idx)) 4117 return -ENODEV; 4118 4119 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED, 4120 "amdgpu/%s", buf); 4121 if (ret) 4122 goto fail; 4123 4124 /* LFB address which is aligned to 1MB boundary per PSP request */ 4125 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 4126 AMDGPU_GEM_DOMAIN_VRAM | 4127 AMDGPU_GEM_DOMAIN_GTT, 4128 &fw_buf_bo, &fw_pri_mc_addr, 4129 &fw_pri_cpu_addr); 4130 if (ret) 4131 goto rel_buf; 4132 4133 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 4134 4135 mutex_lock(&adev->psp.mutex); 4136 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 4137 mutex_unlock(&adev->psp.mutex); 4138 4139 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4140 4141 rel_buf: 4142 amdgpu_ucode_release(&usbc_pd_fw); 4143 fail: 4144 if (ret) { 4145 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 4146 count = ret; 4147 } 4148 4149 drm_dev_exit(idx); 4150 return count; 4151 } 4152 4153 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 4154 { 4155 int idx; 4156 4157 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 4158 return; 4159 4160 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 4161 memcpy(psp->fw_pri_buf, start_addr, bin_size); 4162 4163 drm_dev_exit(idx); 4164 } 4165 4166 /** 4167 * DOC: usbc_pd_fw 4168 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 4169 * this file will trigger the update process. 4170 */ 4171 static DEVICE_ATTR(usbc_pd_fw, 0644, 4172 psp_usbc_pd_fw_sysfs_read, 4173 psp_usbc_pd_fw_sysfs_write); 4174 4175 int is_psp_fw_valid(struct psp_bin_desc bin) 4176 { 4177 return bin.size_bytes; 4178 } 4179 4180 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 4181 const struct bin_attribute *bin_attr, 4182 char *buffer, loff_t pos, size_t count) 4183 { 4184 struct device *dev = kobj_to_dev(kobj); 4185 struct drm_device *ddev = dev_get_drvdata(dev); 4186 struct amdgpu_device *adev = drm_to_adev(ddev); 4187 4188 adev->psp.vbflash_done = false; 4189 4190 /* Safeguard against memory drain */ 4191 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 4192 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 4193 kvfree(adev->psp.vbflash_tmp_buf); 4194 adev->psp.vbflash_tmp_buf = NULL; 4195 adev->psp.vbflash_image_size = 0; 4196 return -ENOMEM; 4197 } 4198 4199 /* TODO Just allocate max for now and optimize to realloc later if needed */ 4200 if (!adev->psp.vbflash_tmp_buf) { 4201 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 4202 if (!adev->psp.vbflash_tmp_buf) 4203 return -ENOMEM; 4204 } 4205 4206 mutex_lock(&adev->psp.mutex); 4207 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 4208 adev->psp.vbflash_image_size += count; 4209 mutex_unlock(&adev->psp.mutex); 4210 4211 dev_dbg(adev->dev, "IFWI staged for update\n"); 4212 4213 return count; 4214 } 4215 4216 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 4217 const struct bin_attribute *bin_attr, char *buffer, 4218 loff_t pos, size_t count) 4219 { 4220 struct device *dev = kobj_to_dev(kobj); 4221 struct drm_device *ddev = dev_get_drvdata(dev); 4222 struct amdgpu_device *adev = drm_to_adev(ddev); 4223 struct amdgpu_bo *fw_buf_bo = NULL; 4224 uint64_t fw_pri_mc_addr; 4225 void *fw_pri_cpu_addr; 4226 int ret; 4227 4228 if (adev->psp.vbflash_image_size == 0) 4229 return -EINVAL; 4230 4231 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 4232 4233 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 4234 AMDGPU_GPU_PAGE_SIZE, 4235 AMDGPU_GEM_DOMAIN_VRAM, 4236 &fw_buf_bo, 4237 &fw_pri_mc_addr, 4238 &fw_pri_cpu_addr); 4239 if (ret) 4240 goto rel_buf; 4241 4242 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 4243 4244 mutex_lock(&adev->psp.mutex); 4245 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 4246 mutex_unlock(&adev->psp.mutex); 4247 4248 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4249 4250 rel_buf: 4251 kvfree(adev->psp.vbflash_tmp_buf); 4252 adev->psp.vbflash_tmp_buf = NULL; 4253 adev->psp.vbflash_image_size = 0; 4254 4255 if (ret) { 4256 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 4257 return ret; 4258 } 4259 4260 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 4261 return 0; 4262 } 4263 4264 /** 4265 * DOC: psp_vbflash 4266 * Writing to this file will stage an IFWI for update. Reading from this file 4267 * will trigger the update process. 4268 */ 4269 static const struct bin_attribute psp_vbflash_bin_attr = { 4270 .attr = {.name = "psp_vbflash", .mode = 0660}, 4271 .size = 0, 4272 .write = amdgpu_psp_vbflash_write, 4273 .read = amdgpu_psp_vbflash_read, 4274 }; 4275 4276 /** 4277 * DOC: psp_vbflash_status 4278 * The status of the flash process. 4279 * 0: IFWI flash not complete. 4280 * 1: IFWI flash complete. 4281 */ 4282 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 4283 struct device_attribute *attr, 4284 char *buf) 4285 { 4286 struct drm_device *ddev = dev_get_drvdata(dev); 4287 struct amdgpu_device *adev = drm_to_adev(ddev); 4288 uint32_t vbflash_status; 4289 4290 vbflash_status = psp_vbflash_status(&adev->psp); 4291 if (!adev->psp.vbflash_done) 4292 vbflash_status = 0; 4293 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 4294 vbflash_status = 1; 4295 4296 return sysfs_emit(buf, "0x%x\n", vbflash_status); 4297 } 4298 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 4299 4300 static const struct bin_attribute *const bin_flash_attrs[] = { 4301 &psp_vbflash_bin_attr, 4302 NULL 4303 }; 4304 4305 static struct attribute *flash_attrs[] = { 4306 &dev_attr_psp_vbflash_status.attr, 4307 &dev_attr_usbc_pd_fw.attr, 4308 NULL 4309 }; 4310 4311 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 4312 { 4313 struct device *dev = kobj_to_dev(kobj); 4314 struct drm_device *ddev = dev_get_drvdata(dev); 4315 struct amdgpu_device *adev = drm_to_adev(ddev); 4316 4317 if (attr == &dev_attr_usbc_pd_fw.attr) 4318 return adev->psp.sup_pd_fw_up ? 0660 : 0; 4319 4320 return adev->psp.sup_ifwi_up ? 0440 : 0; 4321 } 4322 4323 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 4324 const struct bin_attribute *attr, 4325 int idx) 4326 { 4327 struct device *dev = kobj_to_dev(kobj); 4328 struct drm_device *ddev = dev_get_drvdata(dev); 4329 struct amdgpu_device *adev = drm_to_adev(ddev); 4330 4331 return adev->psp.sup_ifwi_up ? 0660 : 0; 4332 } 4333 4334 const struct attribute_group amdgpu_flash_attr_group = { 4335 .attrs = flash_attrs, 4336 .bin_attrs = bin_flash_attrs, 4337 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 4338 .is_visible = amdgpu_flash_attr_is_visible, 4339 }; 4340 4341 #if defined(CONFIG_DEBUG_FS) 4342 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp) 4343 { 4344 struct amdgpu_device *adev = filp->f_inode->i_private; 4345 struct spirom_bo *bo_triplet; 4346 int ret; 4347 4348 /* serialize the open() file calling */ 4349 if (!mutex_trylock(&adev->psp.mutex)) 4350 return -EBUSY; 4351 4352 /* 4353 * make sure only one userpace process is alive for dumping so that 4354 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed. 4355 * let's say the case where one process try opening the file while 4356 * another one has proceeded to read or release. In this way, eliminate 4357 * the use of mutex for read() or release() callback as well. 4358 */ 4359 if (adev->psp.spirom_dump_trip) { 4360 mutex_unlock(&adev->psp.mutex); 4361 return -EBUSY; 4362 } 4363 4364 bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL); 4365 if (!bo_triplet) { 4366 mutex_unlock(&adev->psp.mutex); 4367 return -ENOMEM; 4368 } 4369 4370 ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2, 4371 AMDGPU_GPU_PAGE_SIZE, 4372 AMDGPU_GEM_DOMAIN_GTT, 4373 &bo_triplet->bo, 4374 &bo_triplet->mc_addr, 4375 &bo_triplet->cpu_addr); 4376 if (ret) 4377 goto rel_trip; 4378 4379 ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr); 4380 if (ret) 4381 goto rel_bo; 4382 4383 adev->psp.spirom_dump_trip = bo_triplet; 4384 mutex_unlock(&adev->psp.mutex); 4385 return 0; 4386 rel_bo: 4387 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4388 &bo_triplet->cpu_addr); 4389 rel_trip: 4390 kfree(bo_triplet); 4391 mutex_unlock(&adev->psp.mutex); 4392 dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret); 4393 return ret; 4394 } 4395 4396 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size, 4397 loff_t *pos) 4398 { 4399 struct amdgpu_device *adev = filp->f_inode->i_private; 4400 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4401 4402 if (!bo_triplet) 4403 return -EINVAL; 4404 4405 return simple_read_from_buffer(buf, 4406 size, 4407 pos, bo_triplet->cpu_addr, 4408 AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4409 } 4410 4411 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp) 4412 { 4413 struct amdgpu_device *adev = filp->f_inode->i_private; 4414 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4415 4416 if (bo_triplet) { 4417 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4418 &bo_triplet->cpu_addr); 4419 kfree(bo_triplet); 4420 } 4421 4422 adev->psp.spirom_dump_trip = NULL; 4423 return 0; 4424 } 4425 4426 static const struct file_operations psp_dump_spirom_debugfs_ops = { 4427 .owner = THIS_MODULE, 4428 .open = psp_read_spirom_debugfs_open, 4429 .read = psp_read_spirom_debugfs_read, 4430 .release = psp_read_spirom_debugfs_release, 4431 .llseek = default_llseek, 4432 }; 4433 #endif 4434 4435 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev) 4436 { 4437 #if defined(CONFIG_DEBUG_FS) 4438 struct drm_minor *minor = adev_to_drm(adev)->primary; 4439 4440 debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root, 4441 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4442 #endif 4443 } 4444 4445 const struct amd_ip_funcs psp_ip_funcs = { 4446 .name = "psp", 4447 .early_init = psp_early_init, 4448 .sw_init = psp_sw_init, 4449 .sw_fini = psp_sw_fini, 4450 .hw_init = psp_hw_init, 4451 .hw_fini = psp_hw_fini, 4452 .suspend = psp_suspend, 4453 .resume = psp_resume, 4454 .set_clockgating_state = psp_set_clockgating_state, 4455 .set_powergating_state = psp_set_powergating_state, 4456 }; 4457 4458 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 4459 .type = AMD_IP_BLOCK_TYPE_PSP, 4460 .major = 3, 4461 .minor = 1, 4462 .rev = 0, 4463 .funcs = &psp_ip_funcs, 4464 }; 4465 4466 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 4467 .type = AMD_IP_BLOCK_TYPE_PSP, 4468 .major = 10, 4469 .minor = 0, 4470 .rev = 0, 4471 .funcs = &psp_ip_funcs, 4472 }; 4473 4474 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 4475 .type = AMD_IP_BLOCK_TYPE_PSP, 4476 .major = 11, 4477 .minor = 0, 4478 .rev = 0, 4479 .funcs = &psp_ip_funcs, 4480 }; 4481 4482 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 4483 .type = AMD_IP_BLOCK_TYPE_PSP, 4484 .major = 11, 4485 .minor = 0, 4486 .rev = 8, 4487 .funcs = &psp_ip_funcs, 4488 }; 4489 4490 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 4491 .type = AMD_IP_BLOCK_TYPE_PSP, 4492 .major = 12, 4493 .minor = 0, 4494 .rev = 0, 4495 .funcs = &psp_ip_funcs, 4496 }; 4497 4498 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4499 .type = AMD_IP_BLOCK_TYPE_PSP, 4500 .major = 13, 4501 .minor = 0, 4502 .rev = 0, 4503 .funcs = &psp_ip_funcs, 4504 }; 4505 4506 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4507 .type = AMD_IP_BLOCK_TYPE_PSP, 4508 .major = 13, 4509 .minor = 0, 4510 .rev = 4, 4511 .funcs = &psp_ip_funcs, 4512 }; 4513 4514 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4515 .type = AMD_IP_BLOCK_TYPE_PSP, 4516 .major = 14, 4517 .minor = 0, 4518 .rev = 0, 4519 .funcs = &psp_ip_funcs, 4520 }; 4521