1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 43 #include "amdgpu_ras.h" 44 #include "amdgpu_securedisplay.h" 45 #include "amdgpu_atomfirmware.h" 46 47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16) 48 49 static int psp_load_smu_fw(struct psp_context *psp); 50 static int psp_rap_terminate(struct psp_context *psp); 51 static int psp_securedisplay_terminate(struct psp_context *psp); 52 53 static int psp_ring_init(struct psp_context *psp, 54 enum psp_ring_type ring_type) 55 { 56 int ret = 0; 57 struct psp_ring *ring; 58 struct amdgpu_device *adev = psp->adev; 59 60 ring = &psp->km_ring; 61 62 ring->ring_type = ring_type; 63 64 /* allocate 4k Page of Local Frame Buffer memory for ring */ 65 ring->ring_size = 0x1000; 66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 67 AMDGPU_GEM_DOMAIN_VRAM | 68 AMDGPU_GEM_DOMAIN_GTT, 69 &adev->firmware.rbuf, 70 &ring->ring_mem_mc_addr, 71 (void **)&ring->ring_mem); 72 if (ret) { 73 ring->ring_size = 0; 74 return ret; 75 } 76 77 return 0; 78 } 79 80 /* 81 * Due to DF Cstate management centralized to PMFW, the firmware 82 * loading sequence will be updated as below: 83 * - Load KDB 84 * - Load SYS_DRV 85 * - Load tOS 86 * - Load PMFW 87 * - Setup TMR 88 * - Load other non-psp fw 89 * - Load ASD 90 * - Load XGMI/RAS/HDCP/DTM TA if any 91 * 92 * This new sequence is required for 93 * - Arcturus and onwards 94 */ 95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 96 { 97 struct amdgpu_device *adev = psp->adev; 98 99 if (amdgpu_sriov_vf(adev)) { 100 psp->pmfw_centralized_cstate_management = false; 101 return; 102 } 103 104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 105 case IP_VERSION(11, 0, 0): 106 case IP_VERSION(11, 0, 4): 107 case IP_VERSION(11, 0, 5): 108 case IP_VERSION(11, 0, 7): 109 case IP_VERSION(11, 0, 9): 110 case IP_VERSION(11, 0, 11): 111 case IP_VERSION(11, 0, 12): 112 case IP_VERSION(11, 0, 13): 113 case IP_VERSION(13, 0, 0): 114 case IP_VERSION(13, 0, 2): 115 case IP_VERSION(13, 0, 7): 116 psp->pmfw_centralized_cstate_management = true; 117 break; 118 default: 119 psp->pmfw_centralized_cstate_management = false; 120 break; 121 } 122 } 123 124 static int psp_init_sriov_microcode(struct psp_context *psp) 125 { 126 struct amdgpu_device *adev = psp->adev; 127 char ucode_prefix[30]; 128 int ret = 0; 129 130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 131 132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 133 case IP_VERSION(9, 0, 0): 134 case IP_VERSION(11, 0, 7): 135 case IP_VERSION(11, 0, 9): 136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 137 ret = psp_init_cap_microcode(psp, ucode_prefix); 138 break; 139 case IP_VERSION(13, 0, 2): 140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 141 ret = psp_init_cap_microcode(psp, ucode_prefix); 142 ret &= psp_init_ta_microcode(psp, ucode_prefix); 143 break; 144 case IP_VERSION(13, 0, 0): 145 adev->virt.autoload_ucode_id = 0; 146 break; 147 case IP_VERSION(13, 0, 6): 148 case IP_VERSION(13, 0, 14): 149 ret = psp_init_cap_microcode(psp, ucode_prefix); 150 ret &= psp_init_ta_microcode(psp, ucode_prefix); 151 break; 152 case IP_VERSION(13, 0, 10): 153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 154 ret = psp_init_cap_microcode(psp, ucode_prefix); 155 break; 156 case IP_VERSION(13, 0, 12): 157 ret = psp_init_ta_microcode(psp, ucode_prefix); 158 break; 159 default: 160 return -EINVAL; 161 } 162 return ret; 163 } 164 165 static int psp_early_init(struct amdgpu_ip_block *ip_block) 166 { 167 struct amdgpu_device *adev = ip_block->adev; 168 struct psp_context *psp = &adev->psp; 169 170 psp->autoload_supported = true; 171 psp->boot_time_tmr = true; 172 173 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 174 case IP_VERSION(9, 0, 0): 175 psp_v3_1_set_psp_funcs(psp); 176 psp->autoload_supported = false; 177 psp->boot_time_tmr = false; 178 break; 179 case IP_VERSION(10, 0, 0): 180 case IP_VERSION(10, 0, 1): 181 psp_v10_0_set_psp_funcs(psp); 182 psp->autoload_supported = false; 183 psp->boot_time_tmr = false; 184 break; 185 case IP_VERSION(11, 0, 2): 186 case IP_VERSION(11, 0, 4): 187 psp_v11_0_set_psp_funcs(psp); 188 psp->autoload_supported = false; 189 psp->boot_time_tmr = false; 190 break; 191 case IP_VERSION(11, 0, 0): 192 case IP_VERSION(11, 0, 7): 193 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 194 fallthrough; 195 case IP_VERSION(11, 0, 5): 196 case IP_VERSION(11, 0, 9): 197 case IP_VERSION(11, 0, 11): 198 case IP_VERSION(11, 5, 0): 199 case IP_VERSION(11, 5, 2): 200 case IP_VERSION(11, 0, 12): 201 case IP_VERSION(11, 0, 13): 202 psp_v11_0_set_psp_funcs(psp); 203 psp->boot_time_tmr = false; 204 break; 205 case IP_VERSION(11, 0, 3): 206 case IP_VERSION(12, 0, 1): 207 psp_v12_0_set_psp_funcs(psp); 208 psp->autoload_supported = false; 209 psp->boot_time_tmr = false; 210 break; 211 case IP_VERSION(13, 0, 2): 212 psp->boot_time_tmr = false; 213 fallthrough; 214 case IP_VERSION(13, 0, 6): 215 case IP_VERSION(13, 0, 14): 216 psp_v13_0_set_psp_funcs(psp); 217 psp->autoload_supported = false; 218 break; 219 case IP_VERSION(13, 0, 12): 220 psp_v13_0_set_psp_funcs(psp); 221 psp->autoload_supported = false; 222 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 223 break; 224 case IP_VERSION(13, 0, 1): 225 case IP_VERSION(13, 0, 3): 226 case IP_VERSION(13, 0, 5): 227 case IP_VERSION(13, 0, 8): 228 case IP_VERSION(13, 0, 11): 229 case IP_VERSION(14, 0, 0): 230 case IP_VERSION(14, 0, 1): 231 case IP_VERSION(14, 0, 4): 232 psp_v13_0_set_psp_funcs(psp); 233 psp->boot_time_tmr = false; 234 break; 235 case IP_VERSION(11, 0, 8): 236 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 237 psp_v11_0_8_set_psp_funcs(psp); 238 } 239 psp->autoload_supported = false; 240 psp->boot_time_tmr = false; 241 break; 242 case IP_VERSION(13, 0, 0): 243 case IP_VERSION(13, 0, 7): 244 case IP_VERSION(13, 0, 10): 245 psp_v13_0_set_psp_funcs(psp); 246 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 247 psp->boot_time_tmr = false; 248 break; 249 case IP_VERSION(13, 0, 4): 250 psp_v13_0_4_set_psp_funcs(psp); 251 psp->boot_time_tmr = false; 252 break; 253 case IP_VERSION(14, 0, 2): 254 case IP_VERSION(14, 0, 3): 255 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 256 psp_v14_0_set_psp_funcs(psp); 257 break; 258 case IP_VERSION(14, 0, 5): 259 psp_v14_0_set_psp_funcs(psp); 260 psp->boot_time_tmr = false; 261 break; 262 default: 263 return -EINVAL; 264 } 265 266 psp->adev = adev; 267 268 adev->psp_timeout = 20000; 269 270 psp_check_pmfw_centralized_cstate_management(psp); 271 272 if (amdgpu_sriov_vf(adev)) 273 return psp_init_sriov_microcode(psp); 274 else 275 return psp_init_microcode(psp); 276 } 277 278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 279 { 280 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 281 &mem_ctx->shared_buf); 282 mem_ctx->shared_bo = NULL; 283 } 284 285 static void psp_free_shared_bufs(struct psp_context *psp) 286 { 287 void *tmr_buf; 288 void **pptr; 289 290 /* free TMR memory buffer */ 291 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 292 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 293 psp->tmr_bo = NULL; 294 295 /* free xgmi shared memory */ 296 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 297 298 /* free ras shared memory */ 299 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 300 301 /* free hdcp shared memory */ 302 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 303 304 /* free dtm shared memory */ 305 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 306 307 /* free rap shared memory */ 308 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 309 310 /* free securedisplay shared memory */ 311 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 312 313 314 } 315 316 static void psp_memory_training_fini(struct psp_context *psp) 317 { 318 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 319 320 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 321 kfree(ctx->sys_cache); 322 ctx->sys_cache = NULL; 323 } 324 325 static int psp_memory_training_init(struct psp_context *psp) 326 { 327 int ret; 328 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 329 330 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 331 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 332 return 0; 333 } 334 335 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 336 if (ctx->sys_cache == NULL) { 337 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 338 ret = -ENOMEM; 339 goto Err_out; 340 } 341 342 dev_dbg(psp->adev->dev, 343 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 344 ctx->train_data_size, 345 ctx->p2c_train_data_offset, 346 ctx->c2p_train_data_offset); 347 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 348 return 0; 349 350 Err_out: 351 psp_memory_training_fini(psp); 352 return ret; 353 } 354 355 /* 356 * Helper funciton to query psp runtime database entry 357 * 358 * @adev: amdgpu_device pointer 359 * @entry_type: the type of psp runtime database entry 360 * @db_entry: runtime database entry pointer 361 * 362 * Return false if runtime database doesn't exit or entry is invalid 363 * or true if the specific database entry is found, and copy to @db_entry 364 */ 365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 366 enum psp_runtime_entry_type entry_type, 367 void *db_entry) 368 { 369 uint64_t db_header_pos, db_dir_pos; 370 struct psp_runtime_data_header db_header = {0}; 371 struct psp_runtime_data_directory db_dir = {0}; 372 bool ret = false; 373 int i; 374 375 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 376 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 377 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 378 return false; 379 380 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 381 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 382 383 /* read runtime db header from vram */ 384 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 385 sizeof(struct psp_runtime_data_header), false); 386 387 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 388 /* runtime db doesn't exist, exit */ 389 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 390 return false; 391 } 392 393 /* read runtime database entry from vram */ 394 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 395 sizeof(struct psp_runtime_data_directory), false); 396 397 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 398 /* invalid db entry count, exit */ 399 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 400 return false; 401 } 402 403 /* look up for requested entry type */ 404 for (i = 0; i < db_dir.entry_count && !ret; i++) { 405 if (db_dir.entry_list[i].entry_type == entry_type) { 406 switch (entry_type) { 407 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 408 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 409 /* invalid db entry size */ 410 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 411 return false; 412 } 413 /* read runtime database entry */ 414 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 415 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 416 ret = true; 417 break; 418 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 419 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 420 /* invalid db entry size */ 421 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 422 return false; 423 } 424 /* read runtime database entry */ 425 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 426 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 427 ret = true; 428 break; 429 default: 430 ret = false; 431 break; 432 } 433 } 434 } 435 436 return ret; 437 } 438 439 static int psp_sw_init(struct amdgpu_ip_block *ip_block) 440 { 441 struct amdgpu_device *adev = ip_block->adev; 442 struct psp_context *psp = &adev->psp; 443 int ret; 444 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 445 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 446 struct psp_runtime_scpm_entry scpm_entry; 447 448 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 449 if (!psp->cmd) { 450 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 451 ret = -ENOMEM; 452 } 453 454 adev->psp.xgmi_context.supports_extended_data = 455 !adev->gmc.xgmi.connected_to_cpu && 456 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 457 458 memset(&scpm_entry, 0, sizeof(scpm_entry)); 459 if ((psp_get_runtime_db_entry(adev, 460 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 461 &scpm_entry)) && 462 (scpm_entry.scpm_status != SCPM_DISABLE)) { 463 adev->scpm_enabled = true; 464 adev->scpm_status = scpm_entry.scpm_status; 465 } else { 466 adev->scpm_enabled = false; 467 adev->scpm_status = SCPM_DISABLE; 468 } 469 470 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 471 472 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 473 if (psp_get_runtime_db_entry(adev, 474 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 475 &boot_cfg_entry)) { 476 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 477 if ((psp->boot_cfg_bitmask) & 478 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 479 /* If psp runtime database exists, then 480 * only enable two stage memory training 481 * when TWO_STAGE_DRAM_TRAINING bit is set 482 * in runtime database 483 */ 484 mem_training_ctx->enable_mem_training = true; 485 } 486 487 } else { 488 /* If psp runtime database doesn't exist or is 489 * invalid, force enable two stage memory training 490 */ 491 mem_training_ctx->enable_mem_training = true; 492 } 493 494 if (mem_training_ctx->enable_mem_training) { 495 ret = psp_memory_training_init(psp); 496 if (ret) { 497 dev_err(adev->dev, "Failed to initialize memory training!\n"); 498 return ret; 499 } 500 501 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 502 if (ret) { 503 dev_err(adev->dev, "Failed to process memory training!\n"); 504 return ret; 505 } 506 } 507 508 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 509 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 510 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 511 &psp->fw_pri_bo, 512 &psp->fw_pri_mc_addr, 513 &psp->fw_pri_buf); 514 if (ret) 515 return ret; 516 517 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 518 AMDGPU_GEM_DOMAIN_VRAM | 519 AMDGPU_GEM_DOMAIN_GTT, 520 &psp->fence_buf_bo, 521 &psp->fence_buf_mc_addr, 522 &psp->fence_buf); 523 if (ret) 524 goto failed1; 525 526 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 527 AMDGPU_GEM_DOMAIN_VRAM | 528 AMDGPU_GEM_DOMAIN_GTT, 529 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 530 (void **)&psp->cmd_buf_mem); 531 if (ret) 532 goto failed2; 533 534 return 0; 535 536 failed2: 537 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 538 &psp->fence_buf_mc_addr, &psp->fence_buf); 539 failed1: 540 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 541 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 542 return ret; 543 } 544 545 static int psp_sw_fini(struct amdgpu_ip_block *ip_block) 546 { 547 struct amdgpu_device *adev = ip_block->adev; 548 struct psp_context *psp = &adev->psp; 549 550 psp_memory_training_fini(psp); 551 552 amdgpu_ucode_release(&psp->sos_fw); 553 amdgpu_ucode_release(&psp->asd_fw); 554 amdgpu_ucode_release(&psp->ta_fw); 555 amdgpu_ucode_release(&psp->cap_fw); 556 amdgpu_ucode_release(&psp->toc_fw); 557 558 kfree(psp->cmd); 559 psp->cmd = NULL; 560 561 psp_free_shared_bufs(psp); 562 563 if (psp->km_ring.ring_mem) 564 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 565 &psp->km_ring.ring_mem_mc_addr, 566 (void **)&psp->km_ring.ring_mem); 567 568 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 569 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 570 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 571 &psp->fence_buf_mc_addr, &psp->fence_buf); 572 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 573 (void **)&psp->cmd_buf_mem); 574 575 return 0; 576 } 577 578 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val, 579 uint32_t mask, uint32_t flags) 580 { 581 bool check_changed = flags & PSP_WAITREG_CHANGED; 582 bool verbose = !(flags & PSP_WAITREG_NOVERBOSE); 583 uint32_t val; 584 int i; 585 struct amdgpu_device *adev = psp->adev; 586 587 if (psp->adev->no_hw_access) 588 return 0; 589 590 for (i = 0; i < adev->usec_timeout; i++) { 591 val = RREG32(reg_index); 592 if (check_changed) { 593 if (val != reg_val) 594 return 0; 595 } else { 596 if ((val & mask) == reg_val) 597 return 0; 598 } 599 udelay(1); 600 } 601 602 if (verbose) 603 dev_err(adev->dev, 604 "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x", 605 reg_index, mask, val, reg_val); 606 607 return -ETIME; 608 } 609 610 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 611 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 612 { 613 uint32_t val; 614 int i; 615 struct amdgpu_device *adev = psp->adev; 616 617 if (psp->adev->no_hw_access) 618 return 0; 619 620 for (i = 0; i < msec_timeout; i++) { 621 val = RREG32(reg_index); 622 if ((val & mask) == reg_val) 623 return 0; 624 msleep(1); 625 } 626 627 return -ETIME; 628 } 629 630 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 631 { 632 switch (cmd_id) { 633 case GFX_CMD_ID_LOAD_TA: 634 return "LOAD_TA"; 635 case GFX_CMD_ID_UNLOAD_TA: 636 return "UNLOAD_TA"; 637 case GFX_CMD_ID_INVOKE_CMD: 638 return "INVOKE_CMD"; 639 case GFX_CMD_ID_LOAD_ASD: 640 return "LOAD_ASD"; 641 case GFX_CMD_ID_SETUP_TMR: 642 return "SETUP_TMR"; 643 case GFX_CMD_ID_LOAD_IP_FW: 644 return "LOAD_IP_FW"; 645 case GFX_CMD_ID_DESTROY_TMR: 646 return "DESTROY_TMR"; 647 case GFX_CMD_ID_SAVE_RESTORE: 648 return "SAVE_RESTORE_IP_FW"; 649 case GFX_CMD_ID_SETUP_VMR: 650 return "SETUP_VMR"; 651 case GFX_CMD_ID_DESTROY_VMR: 652 return "DESTROY_VMR"; 653 case GFX_CMD_ID_PROG_REG: 654 return "PROG_REG"; 655 case GFX_CMD_ID_GET_FW_ATTESTATION: 656 return "GET_FW_ATTESTATION"; 657 case GFX_CMD_ID_LOAD_TOC: 658 return "ID_LOAD_TOC"; 659 case GFX_CMD_ID_AUTOLOAD_RLC: 660 return "AUTOLOAD_RLC"; 661 case GFX_CMD_ID_BOOT_CFG: 662 return "BOOT_CFG"; 663 case GFX_CMD_ID_CONFIG_SQ_PERFMON: 664 return "CONFIG_SQ_PERFMON"; 665 case GFX_CMD_ID_FB_FW_RESERV_ADDR: 666 return "FB_FW_RESERV_ADDR"; 667 case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR: 668 return "FB_FW_RESERV_EXT_ADDR"; 669 default: 670 return "UNKNOWN CMD"; 671 } 672 } 673 674 static bool psp_err_warn(struct psp_context *psp) 675 { 676 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 677 678 /* This response indicates reg list is already loaded */ 679 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 680 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 681 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 682 cmd->resp.status == TEE_ERROR_CANCEL) 683 return false; 684 685 return true; 686 } 687 688 static int 689 psp_cmd_submit_buf(struct psp_context *psp, 690 struct amdgpu_firmware_info *ucode, 691 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 692 { 693 int ret; 694 int index; 695 int timeout = psp->adev->psp_timeout; 696 bool ras_intr = false; 697 bool skip_unsupport = false; 698 699 if (psp->adev->no_hw_access) 700 return 0; 701 702 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 703 704 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 705 706 index = atomic_inc_return(&psp->fence_value); 707 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 708 if (ret) { 709 atomic_dec(&psp->fence_value); 710 goto exit; 711 } 712 713 amdgpu_device_invalidate_hdp(psp->adev, NULL); 714 while (*((unsigned int *)psp->fence_buf) != index) { 715 if (--timeout == 0) 716 break; 717 /* 718 * Shouldn't wait for timeout when err_event_athub occurs, 719 * because gpu reset thread triggered and lock resource should 720 * be released for psp resume sequence. 721 */ 722 ras_intr = amdgpu_ras_intr_triggered(); 723 if (ras_intr) 724 break; 725 usleep_range(10, 100); 726 amdgpu_device_invalidate_hdp(psp->adev, NULL); 727 } 728 729 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 730 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 731 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 732 733 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 734 735 /* In some cases, psp response status is not 0 even there is no 736 * problem while the command is submitted. Some version of PSP FW 737 * doesn't write 0 to that field. 738 * So here we would like to only print a warning instead of an error 739 * during psp initialization to avoid breaking hw_init and it doesn't 740 * return -EINVAL. 741 */ 742 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 743 if (ucode) 744 dev_warn(psp->adev->dev, 745 "failed to load ucode %s(0x%X) ", 746 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 747 if (psp_err_warn(psp)) 748 dev_warn( 749 psp->adev->dev, 750 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 751 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 752 psp->cmd_buf_mem->cmd_id, 753 psp->cmd_buf_mem->resp.status); 754 /* If any firmware (including CAP) load fails under SRIOV, it should 755 * return failure to stop the VF from initializing. 756 * Also return failure in case of timeout 757 */ 758 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 759 ret = -EINVAL; 760 goto exit; 761 } 762 } 763 764 if (ucode) { 765 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 766 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 767 } 768 769 exit: 770 return ret; 771 } 772 773 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 774 { 775 struct psp_gfx_cmd_resp *cmd = psp->cmd; 776 777 mutex_lock(&psp->mutex); 778 779 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 780 781 return cmd; 782 } 783 784 static void release_psp_cmd_buf(struct psp_context *psp) 785 { 786 mutex_unlock(&psp->mutex); 787 } 788 789 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 790 struct psp_gfx_cmd_resp *cmd, 791 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 792 { 793 struct amdgpu_device *adev = psp->adev; 794 uint32_t size = 0; 795 uint64_t tmr_pa = 0; 796 797 if (tmr_bo) { 798 size = amdgpu_bo_size(tmr_bo); 799 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 800 } 801 802 if (amdgpu_sriov_vf(psp->adev)) 803 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 804 else 805 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 806 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 807 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 808 cmd->cmd.cmd_setup_tmr.buf_size = size; 809 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 810 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 811 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 812 } 813 814 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 815 uint64_t pri_buf_mc, uint32_t size) 816 { 817 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 818 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 819 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 820 cmd->cmd.cmd_load_toc.toc_size = size; 821 } 822 823 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 824 static int psp_load_toc(struct psp_context *psp, 825 uint32_t *tmr_size) 826 { 827 int ret; 828 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 829 830 /* Copy toc to psp firmware private buffer */ 831 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 832 833 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 834 835 ret = psp_cmd_submit_buf(psp, NULL, cmd, 836 psp->fence_buf_mc_addr); 837 if (!ret) 838 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 839 840 release_psp_cmd_buf(psp); 841 842 return ret; 843 } 844 845 /* Set up Trusted Memory Region */ 846 static int psp_tmr_init(struct psp_context *psp) 847 { 848 int ret = 0; 849 int tmr_size; 850 void *tmr_buf; 851 void **pptr; 852 853 /* 854 * According to HW engineer, they prefer the TMR address be "naturally 855 * aligned" , e.g. the start address be an integer divide of TMR size. 856 * 857 * Note: this memory need be reserved till the driver 858 * uninitializes. 859 */ 860 tmr_size = PSP_TMR_SIZE(psp->adev); 861 862 /* For ASICs support RLC autoload, psp will parse the toc 863 * and calculate the total size of TMR needed 864 */ 865 if (!amdgpu_sriov_vf(psp->adev) && 866 psp->toc.start_addr && 867 psp->toc.size_bytes && 868 psp->fw_pri_buf) { 869 ret = psp_load_toc(psp, &tmr_size); 870 if (ret) { 871 dev_err(psp->adev->dev, "Failed to load toc\n"); 872 return ret; 873 } 874 } 875 876 if (!psp->tmr_bo && !psp->boot_time_tmr) { 877 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 878 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 879 PSP_TMR_ALIGNMENT, 880 AMDGPU_HAS_VRAM(psp->adev) ? 881 AMDGPU_GEM_DOMAIN_VRAM : 882 AMDGPU_GEM_DOMAIN_GTT, 883 &psp->tmr_bo, &psp->tmr_mc_addr, 884 pptr); 885 } 886 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo) 887 psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo); 888 889 return ret; 890 } 891 892 static bool psp_skip_tmr(struct psp_context *psp) 893 { 894 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 895 case IP_VERSION(11, 0, 9): 896 case IP_VERSION(11, 0, 7): 897 case IP_VERSION(13, 0, 2): 898 case IP_VERSION(13, 0, 6): 899 case IP_VERSION(13, 0, 10): 900 case IP_VERSION(13, 0, 12): 901 case IP_VERSION(13, 0, 14): 902 return true; 903 default: 904 return false; 905 } 906 } 907 908 static int psp_tmr_load(struct psp_context *psp) 909 { 910 int ret; 911 struct psp_gfx_cmd_resp *cmd; 912 913 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 914 * Already set up by host driver. 915 */ 916 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 917 return 0; 918 919 cmd = acquire_psp_cmd_buf(psp); 920 921 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 922 if (psp->tmr_bo) 923 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 924 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 925 926 ret = psp_cmd_submit_buf(psp, NULL, cmd, 927 psp->fence_buf_mc_addr); 928 929 release_psp_cmd_buf(psp); 930 931 return ret; 932 } 933 934 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 935 struct psp_gfx_cmd_resp *cmd) 936 { 937 if (amdgpu_sriov_vf(psp->adev)) 938 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 939 else 940 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 941 } 942 943 static int psp_tmr_unload(struct psp_context *psp) 944 { 945 int ret; 946 struct psp_gfx_cmd_resp *cmd; 947 948 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 949 * as TMR is not loaded at all 950 */ 951 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 952 return 0; 953 954 cmd = acquire_psp_cmd_buf(psp); 955 956 psp_prep_tmr_unload_cmd_buf(psp, cmd); 957 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 958 959 ret = psp_cmd_submit_buf(psp, NULL, cmd, 960 psp->fence_buf_mc_addr); 961 962 release_psp_cmd_buf(psp); 963 964 return ret; 965 } 966 967 static int psp_tmr_terminate(struct psp_context *psp) 968 { 969 return psp_tmr_unload(psp); 970 } 971 972 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 973 uint64_t *output_ptr) 974 { 975 int ret; 976 struct psp_gfx_cmd_resp *cmd; 977 978 if (!output_ptr) 979 return -EINVAL; 980 981 if (amdgpu_sriov_vf(psp->adev)) 982 return 0; 983 984 cmd = acquire_psp_cmd_buf(psp); 985 986 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 987 988 ret = psp_cmd_submit_buf(psp, NULL, cmd, 989 psp->fence_buf_mc_addr); 990 991 if (!ret) { 992 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 993 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 994 } 995 996 release_psp_cmd_buf(psp); 997 998 return ret; 999 } 1000 1001 static int psp_get_fw_reservation_info(struct psp_context *psp, 1002 uint32_t cmd_id, 1003 uint64_t *addr, 1004 uint32_t *size) 1005 { 1006 int ret; 1007 uint32_t status; 1008 struct psp_gfx_cmd_resp *cmd; 1009 1010 cmd = acquire_psp_cmd_buf(psp); 1011 1012 cmd->cmd_id = cmd_id; 1013 1014 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1015 psp->fence_buf_mc_addr); 1016 if (ret) { 1017 release_psp_cmd_buf(psp); 1018 return ret; 1019 } 1020 1021 status = cmd->resp.status; 1022 if (status == PSP_ERR_UNKNOWN_COMMAND) { 1023 release_psp_cmd_buf(psp); 1024 *addr = 0; 1025 *size = 0; 1026 return 0; 1027 } 1028 1029 *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 | 1030 cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo; 1031 *size = cmd->resp.uresp.fw_reserve_info.reserve_size; 1032 1033 release_psp_cmd_buf(psp); 1034 1035 return 0; 1036 } 1037 1038 int psp_update_fw_reservation(struct psp_context *psp) 1039 { 1040 int ret; 1041 uint64_t reserv_addr, reserv_addr_ext; 1042 uint32_t reserv_size, reserv_size_ext, mp0_ip_ver; 1043 struct amdgpu_device *adev = psp->adev; 1044 1045 mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0); 1046 1047 if (amdgpu_sriov_vf(psp->adev)) 1048 return 0; 1049 1050 switch (mp0_ip_ver) { 1051 case IP_VERSION(14, 0, 2): 1052 if (adev->psp.sos.fw_version < 0x3b0e0d) 1053 return 0; 1054 break; 1055 1056 case IP_VERSION(14, 0, 3): 1057 if (adev->psp.sos.fw_version < 0x3a0e14) 1058 return 0; 1059 break; 1060 1061 default: 1062 return 0; 1063 } 1064 1065 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); 1066 if (ret) 1067 return ret; 1068 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext); 1069 if (ret) 1070 return ret; 1071 1072 if (reserv_addr != adev->gmc.real_vram_size - reserv_size) { 1073 dev_warn(adev->dev, "reserve fw region is not valid!\n"); 1074 return 0; 1075 } 1076 1077 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1078 1079 reserv_size = roundup(reserv_size, SZ_1M); 1080 1081 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL); 1082 if (ret) { 1083 dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret); 1084 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1085 return ret; 1086 } 1087 1088 reserv_size_ext = roundup(reserv_size_ext, SZ_1M); 1089 1090 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext, 1091 &adev->mman.fw_reserved_memory_extend, NULL); 1092 if (ret) { 1093 dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret); 1094 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL); 1095 return ret; 1096 } 1097 1098 return 0; 1099 } 1100 1101 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 1102 { 1103 struct psp_context *psp = &adev->psp; 1104 struct psp_gfx_cmd_resp *cmd; 1105 int ret; 1106 1107 if (amdgpu_sriov_vf(adev)) 1108 return 0; 1109 1110 cmd = acquire_psp_cmd_buf(psp); 1111 1112 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1113 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 1114 1115 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1116 if (!ret) { 1117 *boot_cfg = 1118 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 1119 } 1120 1121 release_psp_cmd_buf(psp); 1122 1123 return ret; 1124 } 1125 1126 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 1127 { 1128 int ret; 1129 struct psp_context *psp = &adev->psp; 1130 struct psp_gfx_cmd_resp *cmd; 1131 1132 if (amdgpu_sriov_vf(adev)) 1133 return 0; 1134 1135 cmd = acquire_psp_cmd_buf(psp); 1136 1137 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1138 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 1139 cmd->cmd.boot_cfg.boot_config = boot_cfg; 1140 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 1141 1142 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1143 1144 release_psp_cmd_buf(psp); 1145 1146 return ret; 1147 } 1148 1149 static int psp_rl_load(struct amdgpu_device *adev) 1150 { 1151 int ret; 1152 struct psp_context *psp = &adev->psp; 1153 struct psp_gfx_cmd_resp *cmd; 1154 1155 if (!is_psp_fw_valid(psp->rl)) 1156 return 0; 1157 1158 cmd = acquire_psp_cmd_buf(psp); 1159 1160 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1161 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1162 1163 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1164 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1165 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1166 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1167 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1168 1169 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1170 1171 release_psp_cmd_buf(psp); 1172 1173 return ret; 1174 } 1175 1176 int psp_memory_partition(struct psp_context *psp, int mode) 1177 { 1178 struct psp_gfx_cmd_resp *cmd; 1179 int ret; 1180 1181 if (amdgpu_sriov_vf(psp->adev)) 1182 return 0; 1183 1184 cmd = acquire_psp_cmd_buf(psp); 1185 1186 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; 1187 cmd->cmd.cmd_memory_part.mode = mode; 1188 1189 dev_info(psp->adev->dev, 1190 "Requesting %d memory partition change through PSP", mode); 1191 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1192 if (ret) 1193 dev_err(psp->adev->dev, 1194 "PSP request failed to change to NPS%d mode\n", mode); 1195 1196 release_psp_cmd_buf(psp); 1197 1198 return ret; 1199 } 1200 1201 int psp_spatial_partition(struct psp_context *psp, int mode) 1202 { 1203 struct psp_gfx_cmd_resp *cmd; 1204 int ret; 1205 1206 if (amdgpu_sriov_vf(psp->adev)) 1207 return 0; 1208 1209 cmd = acquire_psp_cmd_buf(psp); 1210 1211 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1212 cmd->cmd.cmd_spatial_part.mode = mode; 1213 1214 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1215 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1216 1217 release_psp_cmd_buf(psp); 1218 1219 return ret; 1220 } 1221 1222 static int psp_asd_initialize(struct psp_context *psp) 1223 { 1224 int ret; 1225 1226 /* If PSP version doesn't match ASD version, asd loading will be failed. 1227 * add workaround to bypass it for sriov now. 1228 * TODO: add version check to make it common 1229 */ 1230 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1231 return 0; 1232 1233 /* bypass asd if display hardware is not available */ 1234 if (!amdgpu_device_has_display_hardware(psp->adev) && 1235 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1236 return 0; 1237 1238 psp->asd_context.mem_context.shared_mc_addr = 0; 1239 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1240 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1241 1242 ret = psp_ta_load(psp, &psp->asd_context); 1243 if (!ret) 1244 psp->asd_context.initialized = true; 1245 1246 return ret; 1247 } 1248 1249 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1250 uint32_t session_id) 1251 { 1252 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1253 cmd->cmd.cmd_unload_ta.session_id = session_id; 1254 } 1255 1256 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1257 { 1258 int ret; 1259 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1260 1261 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1262 1263 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1264 1265 context->resp_status = cmd->resp.status; 1266 1267 release_psp_cmd_buf(psp); 1268 1269 return ret; 1270 } 1271 1272 static int psp_asd_terminate(struct psp_context *psp) 1273 { 1274 int ret; 1275 1276 if (amdgpu_sriov_vf(psp->adev)) 1277 return 0; 1278 1279 if (!psp->asd_context.initialized) 1280 return 0; 1281 1282 ret = psp_ta_unload(psp, &psp->asd_context); 1283 if (!ret) 1284 psp->asd_context.initialized = false; 1285 1286 return ret; 1287 } 1288 1289 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1290 uint32_t id, uint32_t value) 1291 { 1292 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1293 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1294 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1295 } 1296 1297 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1298 uint32_t value) 1299 { 1300 struct psp_gfx_cmd_resp *cmd; 1301 int ret = 0; 1302 1303 if (reg >= PSP_REG_LAST) 1304 return -EINVAL; 1305 1306 cmd = acquire_psp_cmd_buf(psp); 1307 1308 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1309 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1310 if (ret) 1311 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1312 1313 release_psp_cmd_buf(psp); 1314 1315 return ret; 1316 } 1317 1318 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1319 uint64_t ta_bin_mc, 1320 struct ta_context *context) 1321 { 1322 cmd->cmd_id = context->ta_load_type; 1323 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1324 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1325 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1326 1327 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1328 lower_32_bits(context->mem_context.shared_mc_addr); 1329 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1330 upper_32_bits(context->mem_context.shared_mc_addr); 1331 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1332 } 1333 1334 int psp_ta_init_shared_buf(struct psp_context *psp, 1335 struct ta_mem_context *mem_ctx) 1336 { 1337 /* 1338 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1339 * physical) for ta to host memory 1340 */ 1341 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1342 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1343 AMDGPU_GEM_DOMAIN_GTT, 1344 &mem_ctx->shared_bo, 1345 &mem_ctx->shared_mc_addr, 1346 &mem_ctx->shared_buf); 1347 } 1348 1349 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1350 uint32_t ta_cmd_id, 1351 uint32_t session_id) 1352 { 1353 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1354 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1355 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1356 } 1357 1358 int psp_ta_invoke(struct psp_context *psp, 1359 uint32_t ta_cmd_id, 1360 struct ta_context *context) 1361 { 1362 int ret; 1363 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1364 1365 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1366 1367 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1368 psp->fence_buf_mc_addr); 1369 1370 context->resp_status = cmd->resp.status; 1371 1372 release_psp_cmd_buf(psp); 1373 1374 return ret; 1375 } 1376 1377 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1378 { 1379 int ret; 1380 struct psp_gfx_cmd_resp *cmd; 1381 1382 cmd = acquire_psp_cmd_buf(psp); 1383 1384 psp_copy_fw(psp, context->bin_desc.start_addr, 1385 context->bin_desc.size_bytes); 1386 1387 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && 1388 context->mem_context.shared_bo) 1389 context->mem_context.shared_mc_addr = 1390 amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo); 1391 1392 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1393 1394 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1395 psp->fence_buf_mc_addr); 1396 1397 context->resp_status = cmd->resp.status; 1398 1399 if (!ret) 1400 context->session_id = cmd->resp.session_id; 1401 1402 release_psp_cmd_buf(psp); 1403 1404 return ret; 1405 } 1406 1407 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1408 { 1409 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1410 } 1411 1412 int psp_xgmi_terminate(struct psp_context *psp) 1413 { 1414 int ret; 1415 struct amdgpu_device *adev = psp->adev; 1416 1417 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1418 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1419 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1420 adev->gmc.xgmi.connected_to_cpu)) 1421 return 0; 1422 1423 if (!psp->xgmi_context.context.initialized) 1424 return 0; 1425 1426 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1427 1428 psp->xgmi_context.context.initialized = false; 1429 1430 return ret; 1431 } 1432 1433 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1434 { 1435 struct ta_xgmi_shared_memory *xgmi_cmd; 1436 int ret; 1437 1438 if (!psp->ta_fw || 1439 !psp->xgmi_context.context.bin_desc.size_bytes || 1440 !psp->xgmi_context.context.bin_desc.start_addr) 1441 return -ENOENT; 1442 1443 if (!load_ta) 1444 goto invoke; 1445 1446 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1447 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1448 1449 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1450 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1451 if (ret) 1452 return ret; 1453 } 1454 1455 /* Load XGMI TA */ 1456 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1457 if (!ret) 1458 psp->xgmi_context.context.initialized = true; 1459 else 1460 return ret; 1461 1462 invoke: 1463 /* Initialize XGMI session */ 1464 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1465 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1466 xgmi_cmd->flag_extend_link_record = set_extended_data; 1467 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1468 1469 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1470 /* note down the capbility flag for XGMI TA */ 1471 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1472 1473 return ret; 1474 } 1475 1476 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1477 { 1478 struct ta_xgmi_shared_memory *xgmi_cmd; 1479 int ret; 1480 1481 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1482 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1483 1484 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1485 1486 /* Invoke xgmi ta to get hive id */ 1487 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1488 if (ret) 1489 return ret; 1490 1491 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1492 1493 return 0; 1494 } 1495 1496 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1497 { 1498 struct ta_xgmi_shared_memory *xgmi_cmd; 1499 int ret; 1500 1501 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1502 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1503 1504 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1505 1506 /* Invoke xgmi ta to get the node id */ 1507 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1508 if (ret) 1509 return ret; 1510 1511 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1512 1513 return 0; 1514 } 1515 1516 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1517 { 1518 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1519 IP_VERSION(13, 0, 2) && 1520 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1521 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1522 IP_VERSION(13, 0, 6); 1523 } 1524 1525 /* 1526 * Chips that support extended topology information require the driver to 1527 * reflect topology information in the opposite direction. This is 1528 * because the TA has already exceeded its link record limit and if the 1529 * TA holds bi-directional information, the driver would have to do 1530 * multiple fetches instead of just two. 1531 */ 1532 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1533 struct psp_xgmi_node_info node_info) 1534 { 1535 struct amdgpu_device *mirror_adev; 1536 struct amdgpu_hive_info *hive; 1537 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1538 uint64_t dst_node_id = node_info.node_id; 1539 uint8_t dst_num_hops = node_info.num_hops; 1540 uint8_t dst_num_links = node_info.num_links; 1541 1542 hive = amdgpu_get_xgmi_hive(psp->adev); 1543 if (WARN_ON(!hive)) 1544 return; 1545 1546 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1547 struct psp_xgmi_topology_info *mirror_top_info; 1548 int j; 1549 1550 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1551 continue; 1552 1553 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1554 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1555 if (mirror_top_info->nodes[j].node_id != src_node_id) 1556 continue; 1557 1558 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1559 /* 1560 * prevent 0 num_links value re-reflection since reflection 1561 * criteria is based on num_hops (direct or indirect). 1562 * 1563 */ 1564 if (dst_num_links) 1565 mirror_top_info->nodes[j].num_links = dst_num_links; 1566 1567 break; 1568 } 1569 1570 break; 1571 } 1572 1573 amdgpu_put_xgmi_hive(hive); 1574 } 1575 1576 int psp_xgmi_get_topology_info(struct psp_context *psp, 1577 int number_devices, 1578 struct psp_xgmi_topology_info *topology, 1579 bool get_extended_data) 1580 { 1581 struct ta_xgmi_shared_memory *xgmi_cmd; 1582 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1583 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1584 int i; 1585 int ret; 1586 1587 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1588 return -EINVAL; 1589 1590 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1591 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1592 xgmi_cmd->flag_extend_link_record = get_extended_data; 1593 1594 /* Fill in the shared memory with topology information as input */ 1595 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1596 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1597 topology_info_input->num_nodes = number_devices; 1598 1599 for (i = 0; i < topology_info_input->num_nodes; i++) { 1600 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1601 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1602 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1603 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1604 } 1605 1606 /* Invoke xgmi ta to get the topology information */ 1607 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1608 if (ret) 1609 return ret; 1610 1611 /* Read the output topology information from the shared memory */ 1612 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1613 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1614 for (i = 0; i < topology->num_nodes; i++) { 1615 /* extended data will either be 0 or equal to non-extended data */ 1616 if (topology_info_output->nodes[i].num_hops) 1617 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1618 1619 /* non-extended data gets everything here so no need to update */ 1620 if (!get_extended_data) { 1621 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1622 topology->nodes[i].is_sharing_enabled = 1623 topology_info_output->nodes[i].is_sharing_enabled; 1624 topology->nodes[i].sdma_engine = 1625 topology_info_output->nodes[i].sdma_engine; 1626 } 1627 1628 } 1629 1630 /* Invoke xgmi ta again to get the link information */ 1631 if (psp_xgmi_peer_link_info_supported(psp)) { 1632 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1633 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1634 bool requires_reflection = 1635 (psp->xgmi_context.supports_extended_data && 1636 get_extended_data) || 1637 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1638 IP_VERSION(13, 0, 6) || 1639 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1640 IP_VERSION(13, 0, 14); 1641 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1642 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1643 1644 /* popluate the shared output buffer rather than the cmd input buffer 1645 * with node_ids as the input for GET_PEER_LINKS command execution. 1646 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1647 * The same requirement for GET_EXTEND_PEER_LINKS command. 1648 */ 1649 if (ta_port_num_support) { 1650 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1651 1652 for (i = 0; i < topology->num_nodes; i++) 1653 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1654 1655 link_extend_info_output->num_nodes = topology->num_nodes; 1656 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1657 } else { 1658 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1659 1660 for (i = 0; i < topology->num_nodes; i++) 1661 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1662 1663 link_info_output->num_nodes = topology->num_nodes; 1664 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1665 } 1666 1667 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1668 if (ret) 1669 return ret; 1670 1671 for (i = 0; i < topology->num_nodes; i++) { 1672 uint8_t node_num_links = ta_port_num_support ? 1673 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1674 /* accumulate num_links on extended data */ 1675 if (get_extended_data) { 1676 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1677 } else { 1678 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1679 topology->nodes[i].num_links : node_num_links; 1680 } 1681 /* popluate the connected port num info if supported and available */ 1682 if (ta_port_num_support && topology->nodes[i].num_links) { 1683 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1684 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1685 } 1686 1687 /* reflect the topology information for bi-directionality */ 1688 if (requires_reflection && topology->nodes[i].num_hops) 1689 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1690 } 1691 } 1692 1693 return 0; 1694 } 1695 1696 int psp_xgmi_set_topology_info(struct psp_context *psp, 1697 int number_devices, 1698 struct psp_xgmi_topology_info *topology) 1699 { 1700 struct ta_xgmi_shared_memory *xgmi_cmd; 1701 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1702 int i; 1703 1704 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1705 return -EINVAL; 1706 1707 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1708 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1709 1710 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1711 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1712 topology_info_input->num_nodes = number_devices; 1713 1714 for (i = 0; i < topology_info_input->num_nodes; i++) { 1715 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1716 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1717 topology_info_input->nodes[i].is_sharing_enabled = 1; 1718 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1719 } 1720 1721 /* Invoke xgmi ta to set topology information */ 1722 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1723 } 1724 1725 // ras begin 1726 static void psp_ras_ta_check_status(struct psp_context *psp) 1727 { 1728 struct ta_ras_shared_memory *ras_cmd = 1729 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1730 1731 switch (ras_cmd->ras_status) { 1732 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1733 dev_warn(psp->adev->dev, 1734 "RAS WARNING: cmd failed due to unsupported ip\n"); 1735 break; 1736 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1737 dev_warn(psp->adev->dev, 1738 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1739 break; 1740 case TA_RAS_STATUS__SUCCESS: 1741 break; 1742 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1743 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1744 dev_warn(psp->adev->dev, 1745 "RAS WARNING: Inject error to critical region is not allowed\n"); 1746 break; 1747 default: 1748 dev_warn(psp->adev->dev, 1749 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1750 break; 1751 } 1752 } 1753 1754 static int psp_ras_send_cmd(struct psp_context *psp, 1755 enum ras_command cmd_id, void *in, void *out) 1756 { 1757 struct ta_ras_shared_memory *ras_cmd; 1758 uint32_t cmd = cmd_id; 1759 int ret = 0; 1760 1761 if (!in) 1762 return -EINVAL; 1763 1764 mutex_lock(&psp->ras_context.mutex); 1765 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1766 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1767 1768 switch (cmd) { 1769 case TA_RAS_COMMAND__ENABLE_FEATURES: 1770 case TA_RAS_COMMAND__DISABLE_FEATURES: 1771 memcpy(&ras_cmd->ras_in_message, 1772 in, sizeof(ras_cmd->ras_in_message)); 1773 break; 1774 case TA_RAS_COMMAND__TRIGGER_ERROR: 1775 memcpy(&ras_cmd->ras_in_message.trigger_error, 1776 in, sizeof(ras_cmd->ras_in_message.trigger_error)); 1777 break; 1778 case TA_RAS_COMMAND__QUERY_ADDRESS: 1779 memcpy(&ras_cmd->ras_in_message.address, 1780 in, sizeof(ras_cmd->ras_in_message.address)); 1781 break; 1782 default: 1783 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd); 1784 ret = -EINVAL; 1785 goto err_out; 1786 } 1787 1788 ras_cmd->cmd_id = cmd; 1789 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1790 1791 switch (cmd) { 1792 case TA_RAS_COMMAND__TRIGGER_ERROR: 1793 if (!ret && out) 1794 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status)); 1795 break; 1796 case TA_RAS_COMMAND__QUERY_ADDRESS: 1797 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1798 ret = -EINVAL; 1799 else if (out) 1800 memcpy(out, 1801 &ras_cmd->ras_out_message.address, 1802 sizeof(ras_cmd->ras_out_message.address)); 1803 break; 1804 default: 1805 break; 1806 } 1807 1808 err_out: 1809 mutex_unlock(&psp->ras_context.mutex); 1810 1811 return ret; 1812 } 1813 1814 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1815 { 1816 struct ta_ras_shared_memory *ras_cmd; 1817 int ret; 1818 1819 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1820 1821 /* 1822 * TODO: bypass the loading in sriov for now 1823 */ 1824 if (amdgpu_sriov_vf(psp->adev)) 1825 return 0; 1826 1827 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1828 1829 if (amdgpu_ras_intr_triggered()) 1830 return ret; 1831 1832 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1833 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1834 return -EINVAL; 1835 } 1836 1837 if (!ret) { 1838 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1839 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1840 1841 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1842 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1843 dev_warn(psp->adev->dev, 1844 "RAS internal register access blocked\n"); 1845 1846 psp_ras_ta_check_status(psp); 1847 } 1848 1849 return ret; 1850 } 1851 1852 int psp_ras_enable_features(struct psp_context *psp, 1853 union ta_ras_cmd_input *info, bool enable) 1854 { 1855 enum ras_command cmd_id; 1856 int ret; 1857 1858 if (!psp->ras_context.context.initialized || !info) 1859 return -EINVAL; 1860 1861 cmd_id = enable ? 1862 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES; 1863 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL); 1864 if (ret) 1865 return -EINVAL; 1866 1867 return 0; 1868 } 1869 1870 int psp_ras_terminate(struct psp_context *psp) 1871 { 1872 int ret; 1873 1874 /* 1875 * TODO: bypass the terminate in sriov for now 1876 */ 1877 if (amdgpu_sriov_vf(psp->adev)) 1878 return 0; 1879 1880 if (!psp->ras_context.context.initialized) 1881 return 0; 1882 1883 ret = psp_ta_unload(psp, &psp->ras_context.context); 1884 1885 psp->ras_context.context.initialized = false; 1886 1887 mutex_destroy(&psp->ras_context.mutex); 1888 1889 return ret; 1890 } 1891 1892 int psp_ras_initialize(struct psp_context *psp) 1893 { 1894 int ret; 1895 uint32_t boot_cfg = 0xFF; 1896 struct amdgpu_device *adev = psp->adev; 1897 struct ta_ras_shared_memory *ras_cmd; 1898 1899 /* 1900 * TODO: bypass the initialize in sriov for now 1901 */ 1902 if (amdgpu_sriov_vf(adev)) 1903 return 0; 1904 1905 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1906 !adev->psp.ras_context.context.bin_desc.start_addr) { 1907 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1908 return 0; 1909 } 1910 1911 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1912 /* query GECC enablement status from boot config 1913 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1914 */ 1915 ret = psp_boot_config_get(adev, &boot_cfg); 1916 if (ret) 1917 dev_warn(adev->dev, "PSP get boot config failed\n"); 1918 1919 if (boot_cfg == 1 && !adev->ras_default_ecc_enabled && 1920 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1921 dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n"); 1922 dev_warn(adev->dev, 1923 "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n"); 1924 } else { 1925 if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) && 1926 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1927 if (boot_cfg == 1) { 1928 dev_info(adev->dev, "GECC is enabled\n"); 1929 } else { 1930 /* enable GECC in next boot cycle if it is disabled 1931 * in boot config, or force enable GECC if failed to 1932 * get boot configuration 1933 */ 1934 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1935 if (ret) 1936 dev_warn(adev->dev, "PSP set boot config failed\n"); 1937 else 1938 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1939 } 1940 } else { 1941 if (!boot_cfg) { 1942 if (!adev->ras_default_ecc_enabled && 1943 amdgpu_ras_enable != 1 && 1944 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 1945 dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n"); 1946 else 1947 dev_info(adev->dev, "GECC is disabled\n"); 1948 } else { 1949 /* disable GECC in next boot cycle if ras is 1950 * disabled by module parameter amdgpu_ras_enable 1951 * and/or amdgpu_ras_mask, or boot_config_get call 1952 * is failed 1953 */ 1954 ret = psp_boot_config_set(adev, 0); 1955 if (ret) 1956 dev_warn(adev->dev, "PSP set boot config failed\n"); 1957 else 1958 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1959 } 1960 } 1961 } 1962 } 1963 1964 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1965 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1966 1967 if (!psp->ras_context.context.mem_context.shared_buf) { 1968 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1969 if (ret) 1970 return ret; 1971 } 1972 1973 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1974 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1975 1976 if (amdgpu_ras_is_poison_mode_supported(adev)) 1977 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1978 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1979 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1980 ras_cmd->ras_in_message.init_flags.xcc_mask = 1981 adev->gfx.xcc_mask; 1982 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1983 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 1984 ras_cmd->ras_in_message.init_flags.nps_mode = 1985 adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 1986 ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask; 1987 1988 ret = psp_ta_load(psp, &psp->ras_context.context); 1989 1990 if (!ret && !ras_cmd->ras_status) { 1991 psp->ras_context.context.initialized = true; 1992 mutex_init(&psp->ras_context.mutex); 1993 } else { 1994 if (ras_cmd->ras_status) 1995 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1996 1997 /* fail to load RAS TA */ 1998 psp->ras_context.context.initialized = false; 1999 } 2000 2001 return ret; 2002 } 2003 2004 int psp_ras_trigger_error(struct psp_context *psp, 2005 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 2006 { 2007 struct amdgpu_device *adev = psp->adev; 2008 int ret; 2009 uint32_t dev_mask; 2010 uint32_t ras_status = 0; 2011 2012 if (!psp->ras_context.context.initialized || !info) 2013 return -EINVAL; 2014 2015 switch (info->block_id) { 2016 case TA_RAS_BLOCK__GFX: 2017 dev_mask = GET_MASK(GC, instance_mask); 2018 break; 2019 case TA_RAS_BLOCK__SDMA: 2020 dev_mask = GET_MASK(SDMA0, instance_mask); 2021 break; 2022 case TA_RAS_BLOCK__VCN: 2023 case TA_RAS_BLOCK__JPEG: 2024 dev_mask = GET_MASK(VCN, instance_mask); 2025 break; 2026 default: 2027 dev_mask = instance_mask; 2028 break; 2029 } 2030 2031 /* reuse sub_block_index for backward compatibility */ 2032 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 2033 dev_mask &= AMDGPU_RAS_INST_MASK; 2034 info->sub_block_index |= dev_mask; 2035 2036 ret = psp_ras_send_cmd(psp, 2037 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status); 2038 if (ret) 2039 return -EINVAL; 2040 2041 /* If err_event_athub occurs error inject was successful, however 2042 * return status from TA is no long reliable 2043 */ 2044 if (amdgpu_ras_intr_triggered()) 2045 return 0; 2046 2047 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 2048 return -EACCES; 2049 else if (ras_status) 2050 return -EINVAL; 2051 2052 return 0; 2053 } 2054 2055 int psp_ras_query_address(struct psp_context *psp, 2056 struct ta_ras_query_address_input *addr_in, 2057 struct ta_ras_query_address_output *addr_out) 2058 { 2059 int ret; 2060 2061 if (!psp->ras_context.context.initialized || 2062 !addr_in || !addr_out) 2063 return -EINVAL; 2064 2065 ret = psp_ras_send_cmd(psp, 2066 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out); 2067 2068 return ret; 2069 } 2070 // ras end 2071 2072 // HDCP start 2073 static int psp_hdcp_initialize(struct psp_context *psp) 2074 { 2075 int ret; 2076 2077 /* 2078 * TODO: bypass the initialize in sriov for now 2079 */ 2080 if (amdgpu_sriov_vf(psp->adev)) 2081 return 0; 2082 2083 /* bypass hdcp initialization if dmu is harvested */ 2084 if (!amdgpu_device_has_display_hardware(psp->adev)) 2085 return 0; 2086 2087 if (!psp->hdcp_context.context.bin_desc.size_bytes || 2088 !psp->hdcp_context.context.bin_desc.start_addr) { 2089 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 2090 return 0; 2091 } 2092 2093 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 2094 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2095 2096 if (!psp->hdcp_context.context.mem_context.shared_buf) { 2097 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 2098 if (ret) 2099 return ret; 2100 } 2101 2102 ret = psp_ta_load(psp, &psp->hdcp_context.context); 2103 if (!ret) { 2104 psp->hdcp_context.context.initialized = true; 2105 mutex_init(&psp->hdcp_context.mutex); 2106 } 2107 2108 return ret; 2109 } 2110 2111 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2112 { 2113 /* 2114 * TODO: bypass the loading in sriov for now 2115 */ 2116 if (amdgpu_sriov_vf(psp->adev)) 2117 return 0; 2118 2119 if (!psp->hdcp_context.context.initialized) 2120 return 0; 2121 2122 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 2123 } 2124 2125 static int psp_hdcp_terminate(struct psp_context *psp) 2126 { 2127 int ret; 2128 2129 /* 2130 * TODO: bypass the terminate in sriov for now 2131 */ 2132 if (amdgpu_sriov_vf(psp->adev)) 2133 return 0; 2134 2135 if (!psp->hdcp_context.context.initialized) 2136 return 0; 2137 2138 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 2139 2140 psp->hdcp_context.context.initialized = false; 2141 2142 return ret; 2143 } 2144 // HDCP end 2145 2146 // DTM start 2147 static int psp_dtm_initialize(struct psp_context *psp) 2148 { 2149 int ret; 2150 2151 /* 2152 * TODO: bypass the initialize in sriov for now 2153 */ 2154 if (amdgpu_sriov_vf(psp->adev)) 2155 return 0; 2156 2157 /* bypass dtm initialization if dmu is harvested */ 2158 if (!amdgpu_device_has_display_hardware(psp->adev)) 2159 return 0; 2160 2161 if (!psp->dtm_context.context.bin_desc.size_bytes || 2162 !psp->dtm_context.context.bin_desc.start_addr) { 2163 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 2164 return 0; 2165 } 2166 2167 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 2168 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2169 2170 if (!psp->dtm_context.context.mem_context.shared_buf) { 2171 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 2172 if (ret) 2173 return ret; 2174 } 2175 2176 ret = psp_ta_load(psp, &psp->dtm_context.context); 2177 if (!ret) { 2178 psp->dtm_context.context.initialized = true; 2179 mutex_init(&psp->dtm_context.mutex); 2180 } 2181 2182 return ret; 2183 } 2184 2185 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2186 { 2187 /* 2188 * TODO: bypass the loading in sriov for now 2189 */ 2190 if (amdgpu_sriov_vf(psp->adev)) 2191 return 0; 2192 2193 if (!psp->dtm_context.context.initialized) 2194 return 0; 2195 2196 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 2197 } 2198 2199 static int psp_dtm_terminate(struct psp_context *psp) 2200 { 2201 int ret; 2202 2203 /* 2204 * TODO: bypass the terminate in sriov for now 2205 */ 2206 if (amdgpu_sriov_vf(psp->adev)) 2207 return 0; 2208 2209 if (!psp->dtm_context.context.initialized) 2210 return 0; 2211 2212 ret = psp_ta_unload(psp, &psp->dtm_context.context); 2213 2214 psp->dtm_context.context.initialized = false; 2215 2216 return ret; 2217 } 2218 // DTM end 2219 2220 // RAP start 2221 static int psp_rap_initialize(struct psp_context *psp) 2222 { 2223 int ret; 2224 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 2225 2226 /* 2227 * TODO: bypass the initialize in sriov for now 2228 */ 2229 if (amdgpu_sriov_vf(psp->adev)) 2230 return 0; 2231 2232 if (!psp->rap_context.context.bin_desc.size_bytes || 2233 !psp->rap_context.context.bin_desc.start_addr) { 2234 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 2235 return 0; 2236 } 2237 2238 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 2239 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2240 2241 if (!psp->rap_context.context.mem_context.shared_buf) { 2242 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 2243 if (ret) 2244 return ret; 2245 } 2246 2247 ret = psp_ta_load(psp, &psp->rap_context.context); 2248 if (!ret) { 2249 psp->rap_context.context.initialized = true; 2250 mutex_init(&psp->rap_context.mutex); 2251 } else 2252 return ret; 2253 2254 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2255 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2256 psp_rap_terminate(psp); 2257 /* free rap shared memory */ 2258 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2259 2260 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2261 ret, status); 2262 2263 return ret; 2264 } 2265 2266 return 0; 2267 } 2268 2269 static int psp_rap_terminate(struct psp_context *psp) 2270 { 2271 int ret; 2272 2273 if (!psp->rap_context.context.initialized) 2274 return 0; 2275 2276 ret = psp_ta_unload(psp, &psp->rap_context.context); 2277 2278 psp->rap_context.context.initialized = false; 2279 2280 return ret; 2281 } 2282 2283 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2284 { 2285 struct ta_rap_shared_memory *rap_cmd; 2286 int ret = 0; 2287 2288 if (!psp->rap_context.context.initialized) 2289 return 0; 2290 2291 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2292 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2293 return -EINVAL; 2294 2295 mutex_lock(&psp->rap_context.mutex); 2296 2297 rap_cmd = (struct ta_rap_shared_memory *) 2298 psp->rap_context.context.mem_context.shared_buf; 2299 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2300 2301 rap_cmd->cmd_id = ta_cmd_id; 2302 rap_cmd->validation_method_id = METHOD_A; 2303 2304 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2305 if (ret) 2306 goto out_unlock; 2307 2308 if (status) 2309 *status = rap_cmd->rap_status; 2310 2311 out_unlock: 2312 mutex_unlock(&psp->rap_context.mutex); 2313 2314 return ret; 2315 } 2316 // RAP end 2317 2318 /* securedisplay start */ 2319 static int psp_securedisplay_initialize(struct psp_context *psp) 2320 { 2321 int ret; 2322 struct ta_securedisplay_cmd *securedisplay_cmd; 2323 2324 /* 2325 * TODO: bypass the initialize in sriov for now 2326 */ 2327 if (amdgpu_sriov_vf(psp->adev)) 2328 return 0; 2329 2330 /* bypass securedisplay initialization if dmu is harvested */ 2331 if (!amdgpu_device_has_display_hardware(psp->adev)) 2332 return 0; 2333 2334 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2335 !psp->securedisplay_context.context.bin_desc.start_addr) { 2336 dev_info(psp->adev->dev, 2337 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n"); 2338 return 0; 2339 } 2340 2341 psp->securedisplay_context.context.mem_context.shared_mem_size = 2342 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2343 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2344 2345 if (!psp->securedisplay_context.context.initialized) { 2346 ret = psp_ta_init_shared_buf(psp, 2347 &psp->securedisplay_context.context.mem_context); 2348 if (ret) 2349 return ret; 2350 } 2351 2352 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2353 if (!ret) { 2354 psp->securedisplay_context.context.initialized = true; 2355 mutex_init(&psp->securedisplay_context.mutex); 2356 } else 2357 return ret; 2358 2359 mutex_lock(&psp->securedisplay_context.mutex); 2360 2361 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2362 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2363 2364 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2365 2366 mutex_unlock(&psp->securedisplay_context.mutex); 2367 2368 if (ret) { 2369 psp_securedisplay_terminate(psp); 2370 /* free securedisplay shared memory */ 2371 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2372 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2373 return -EINVAL; 2374 } 2375 2376 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2377 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2378 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2379 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2380 /* don't try again */ 2381 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2382 } 2383 2384 return 0; 2385 } 2386 2387 static int psp_securedisplay_terminate(struct psp_context *psp) 2388 { 2389 int ret; 2390 2391 /* 2392 * TODO:bypass the terminate in sriov for now 2393 */ 2394 if (amdgpu_sriov_vf(psp->adev)) 2395 return 0; 2396 2397 if (!psp->securedisplay_context.context.initialized) 2398 return 0; 2399 2400 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2401 2402 psp->securedisplay_context.context.initialized = false; 2403 2404 return ret; 2405 } 2406 2407 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2408 { 2409 int ret; 2410 2411 if (!psp->securedisplay_context.context.initialized) 2412 return -EINVAL; 2413 2414 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2415 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC && 2416 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2) 2417 return -EINVAL; 2418 2419 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2420 2421 return ret; 2422 } 2423 /* SECUREDISPLAY end */ 2424 2425 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2426 { 2427 struct psp_context *psp = &adev->psp; 2428 int ret = 0; 2429 2430 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2431 ret = psp->funcs->wait_for_bootloader(psp); 2432 2433 return ret; 2434 } 2435 2436 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2437 { 2438 if (psp->funcs && 2439 psp->funcs->get_ras_capability) { 2440 return psp->funcs->get_ras_capability(psp); 2441 } else { 2442 return false; 2443 } 2444 } 2445 2446 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) 2447 { 2448 struct psp_context *psp = &adev->psp; 2449 2450 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2451 return false; 2452 2453 if (psp->funcs && psp->funcs->is_reload_needed) 2454 return psp->funcs->is_reload_needed(psp); 2455 2456 return false; 2457 } 2458 2459 static void psp_update_gpu_addresses(struct amdgpu_device *adev) 2460 { 2461 struct psp_context *psp = &adev->psp; 2462 2463 if (psp->cmd_buf_bo && psp->cmd_buf_mem) { 2464 psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo); 2465 psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo); 2466 psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo); 2467 } 2468 if (adev->firmware.rbuf && psp->km_ring.ring_mem) 2469 psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf); 2470 } 2471 2472 static int psp_hw_start(struct psp_context *psp) 2473 { 2474 struct amdgpu_device *adev = psp->adev; 2475 int ret; 2476 2477 if (amdgpu_virt_xgmi_migrate_enabled(adev)) 2478 psp_update_gpu_addresses(adev); 2479 2480 if (!amdgpu_sriov_vf(adev)) { 2481 if ((is_psp_fw_valid(psp->kdb)) && 2482 (psp->funcs->bootloader_load_kdb != NULL)) { 2483 ret = psp_bootloader_load_kdb(psp); 2484 if (ret) { 2485 dev_err(adev->dev, "PSP load kdb failed!\n"); 2486 return ret; 2487 } 2488 } 2489 2490 if ((is_psp_fw_valid(psp->spl)) && 2491 (psp->funcs->bootloader_load_spl != NULL)) { 2492 ret = psp_bootloader_load_spl(psp); 2493 if (ret) { 2494 dev_err(adev->dev, "PSP load spl failed!\n"); 2495 return ret; 2496 } 2497 } 2498 2499 if ((is_psp_fw_valid(psp->sys)) && 2500 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2501 ret = psp_bootloader_load_sysdrv(psp); 2502 if (ret) { 2503 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2504 return ret; 2505 } 2506 } 2507 2508 if ((is_psp_fw_valid(psp->soc_drv)) && 2509 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2510 ret = psp_bootloader_load_soc_drv(psp); 2511 if (ret) { 2512 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2513 return ret; 2514 } 2515 } 2516 2517 if ((is_psp_fw_valid(psp->intf_drv)) && 2518 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2519 ret = psp_bootloader_load_intf_drv(psp); 2520 if (ret) { 2521 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2522 return ret; 2523 } 2524 } 2525 2526 if ((is_psp_fw_valid(psp->dbg_drv)) && 2527 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2528 ret = psp_bootloader_load_dbg_drv(psp); 2529 if (ret) { 2530 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2531 return ret; 2532 } 2533 } 2534 2535 if ((is_psp_fw_valid(psp->ras_drv)) && 2536 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2537 ret = psp_bootloader_load_ras_drv(psp); 2538 if (ret) { 2539 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2540 return ret; 2541 } 2542 } 2543 2544 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2545 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2546 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2547 if (ret) { 2548 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2549 return ret; 2550 } 2551 } 2552 2553 if ((is_psp_fw_valid(psp->spdm_drv)) && 2554 (psp->funcs->bootloader_load_spdm_drv != NULL)) { 2555 ret = psp_bootloader_load_spdm_drv(psp); 2556 if (ret) { 2557 dev_err(adev->dev, "PSP load spdm_drv failed!\n"); 2558 return ret; 2559 } 2560 } 2561 2562 if ((is_psp_fw_valid(psp->sos)) && 2563 (psp->funcs->bootloader_load_sos != NULL)) { 2564 ret = psp_bootloader_load_sos(psp); 2565 if (ret) { 2566 dev_err(adev->dev, "PSP load sos failed!\n"); 2567 return ret; 2568 } 2569 } 2570 } 2571 2572 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2573 if (ret) { 2574 dev_err(adev->dev, "PSP create ring failed!\n"); 2575 return ret; 2576 } 2577 2578 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2579 ret = psp_update_fw_reservation(psp); 2580 if (ret) { 2581 dev_err(adev->dev, "update fw reservation failed!\n"); 2582 return ret; 2583 } 2584 } 2585 2586 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2587 goto skip_pin_bo; 2588 2589 if (!psp->boot_time_tmr || psp->autoload_supported) { 2590 ret = psp_tmr_init(psp); 2591 if (ret) { 2592 dev_err(adev->dev, "PSP tmr init failed!\n"); 2593 return ret; 2594 } 2595 } 2596 2597 skip_pin_bo: 2598 /* 2599 * For ASICs with DF Cstate management centralized 2600 * to PMFW, TMR setup should be performed after PMFW 2601 * loaded and before other non-psp firmware loaded. 2602 */ 2603 if (psp->pmfw_centralized_cstate_management) { 2604 ret = psp_load_smu_fw(psp); 2605 if (ret) 2606 return ret; 2607 } 2608 2609 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2610 ret = psp_tmr_load(psp); 2611 if (ret) { 2612 dev_err(adev->dev, "PSP load tmr failed!\n"); 2613 return ret; 2614 } 2615 } 2616 2617 return 0; 2618 } 2619 2620 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2621 enum psp_gfx_fw_type *type) 2622 { 2623 switch (ucode->ucode_id) { 2624 case AMDGPU_UCODE_ID_CAP: 2625 *type = GFX_FW_TYPE_CAP; 2626 break; 2627 case AMDGPU_UCODE_ID_SDMA0: 2628 *type = GFX_FW_TYPE_SDMA0; 2629 break; 2630 case AMDGPU_UCODE_ID_SDMA1: 2631 *type = GFX_FW_TYPE_SDMA1; 2632 break; 2633 case AMDGPU_UCODE_ID_SDMA2: 2634 *type = GFX_FW_TYPE_SDMA2; 2635 break; 2636 case AMDGPU_UCODE_ID_SDMA3: 2637 *type = GFX_FW_TYPE_SDMA3; 2638 break; 2639 case AMDGPU_UCODE_ID_SDMA4: 2640 *type = GFX_FW_TYPE_SDMA4; 2641 break; 2642 case AMDGPU_UCODE_ID_SDMA5: 2643 *type = GFX_FW_TYPE_SDMA5; 2644 break; 2645 case AMDGPU_UCODE_ID_SDMA6: 2646 *type = GFX_FW_TYPE_SDMA6; 2647 break; 2648 case AMDGPU_UCODE_ID_SDMA7: 2649 *type = GFX_FW_TYPE_SDMA7; 2650 break; 2651 case AMDGPU_UCODE_ID_CP_MES: 2652 *type = GFX_FW_TYPE_CP_MES; 2653 break; 2654 case AMDGPU_UCODE_ID_CP_MES_DATA: 2655 *type = GFX_FW_TYPE_MES_STACK; 2656 break; 2657 case AMDGPU_UCODE_ID_CP_MES1: 2658 *type = GFX_FW_TYPE_CP_MES_KIQ; 2659 break; 2660 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2661 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2662 break; 2663 case AMDGPU_UCODE_ID_CP_CE: 2664 *type = GFX_FW_TYPE_CP_CE; 2665 break; 2666 case AMDGPU_UCODE_ID_CP_PFP: 2667 *type = GFX_FW_TYPE_CP_PFP; 2668 break; 2669 case AMDGPU_UCODE_ID_CP_ME: 2670 *type = GFX_FW_TYPE_CP_ME; 2671 break; 2672 case AMDGPU_UCODE_ID_CP_MEC1: 2673 *type = GFX_FW_TYPE_CP_MEC; 2674 break; 2675 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2676 *type = GFX_FW_TYPE_CP_MEC_ME1; 2677 break; 2678 case AMDGPU_UCODE_ID_CP_MEC2: 2679 *type = GFX_FW_TYPE_CP_MEC; 2680 break; 2681 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2682 *type = GFX_FW_TYPE_CP_MEC_ME2; 2683 break; 2684 case AMDGPU_UCODE_ID_RLC_P: 2685 *type = GFX_FW_TYPE_RLC_P; 2686 break; 2687 case AMDGPU_UCODE_ID_RLC_V: 2688 *type = GFX_FW_TYPE_RLC_V; 2689 break; 2690 case AMDGPU_UCODE_ID_RLC_G: 2691 *type = GFX_FW_TYPE_RLC_G; 2692 break; 2693 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2694 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2695 break; 2696 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2697 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2698 break; 2699 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2700 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2701 break; 2702 case AMDGPU_UCODE_ID_RLC_IRAM: 2703 *type = GFX_FW_TYPE_RLC_IRAM; 2704 break; 2705 case AMDGPU_UCODE_ID_RLC_DRAM: 2706 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2707 break; 2708 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2709 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2710 break; 2711 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2712 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2713 break; 2714 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2715 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2716 break; 2717 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2718 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2719 break; 2720 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2721 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2722 break; 2723 case AMDGPU_UCODE_ID_SMC: 2724 *type = GFX_FW_TYPE_SMU; 2725 break; 2726 case AMDGPU_UCODE_ID_PPTABLE: 2727 *type = GFX_FW_TYPE_PPTABLE; 2728 break; 2729 case AMDGPU_UCODE_ID_UVD: 2730 *type = GFX_FW_TYPE_UVD; 2731 break; 2732 case AMDGPU_UCODE_ID_UVD1: 2733 *type = GFX_FW_TYPE_UVD1; 2734 break; 2735 case AMDGPU_UCODE_ID_VCE: 2736 *type = GFX_FW_TYPE_VCE; 2737 break; 2738 case AMDGPU_UCODE_ID_VCN: 2739 *type = GFX_FW_TYPE_VCN; 2740 break; 2741 case AMDGPU_UCODE_ID_VCN1: 2742 *type = GFX_FW_TYPE_VCN1; 2743 break; 2744 case AMDGPU_UCODE_ID_DMCU_ERAM: 2745 *type = GFX_FW_TYPE_DMCU_ERAM; 2746 break; 2747 case AMDGPU_UCODE_ID_DMCU_INTV: 2748 *type = GFX_FW_TYPE_DMCU_ISR; 2749 break; 2750 case AMDGPU_UCODE_ID_VCN0_RAM: 2751 *type = GFX_FW_TYPE_VCN0_RAM; 2752 break; 2753 case AMDGPU_UCODE_ID_VCN1_RAM: 2754 *type = GFX_FW_TYPE_VCN1_RAM; 2755 break; 2756 case AMDGPU_UCODE_ID_DMCUB: 2757 *type = GFX_FW_TYPE_DMUB; 2758 break; 2759 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2760 case AMDGPU_UCODE_ID_SDMA_RS64: 2761 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2762 break; 2763 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2764 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2765 break; 2766 case AMDGPU_UCODE_ID_IMU_I: 2767 *type = GFX_FW_TYPE_IMU_I; 2768 break; 2769 case AMDGPU_UCODE_ID_IMU_D: 2770 *type = GFX_FW_TYPE_IMU_D; 2771 break; 2772 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2773 *type = GFX_FW_TYPE_RS64_PFP; 2774 break; 2775 case AMDGPU_UCODE_ID_CP_RS64_ME: 2776 *type = GFX_FW_TYPE_RS64_ME; 2777 break; 2778 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2779 *type = GFX_FW_TYPE_RS64_MEC; 2780 break; 2781 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2782 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2783 break; 2784 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2785 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2786 break; 2787 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2788 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2789 break; 2790 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2791 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2792 break; 2793 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2794 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2795 break; 2796 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2797 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2798 break; 2799 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2800 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2801 break; 2802 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2803 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2804 break; 2805 case AMDGPU_UCODE_ID_VPE_CTX: 2806 *type = GFX_FW_TYPE_VPEC_FW1; 2807 break; 2808 case AMDGPU_UCODE_ID_VPE_CTL: 2809 *type = GFX_FW_TYPE_VPEC_FW2; 2810 break; 2811 case AMDGPU_UCODE_ID_VPE: 2812 *type = GFX_FW_TYPE_VPE; 2813 break; 2814 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2815 *type = GFX_FW_TYPE_UMSCH_UCODE; 2816 break; 2817 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2818 *type = GFX_FW_TYPE_UMSCH_DATA; 2819 break; 2820 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2821 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2822 break; 2823 case AMDGPU_UCODE_ID_P2S_TABLE: 2824 *type = GFX_FW_TYPE_P2S_TABLE; 2825 break; 2826 case AMDGPU_UCODE_ID_JPEG_RAM: 2827 *type = GFX_FW_TYPE_JPEG_RAM; 2828 break; 2829 case AMDGPU_UCODE_ID_ISP: 2830 *type = GFX_FW_TYPE_ISP; 2831 break; 2832 case AMDGPU_UCODE_ID_MAXIMUM: 2833 default: 2834 return -EINVAL; 2835 } 2836 2837 return 0; 2838 } 2839 2840 static void psp_print_fw_hdr(struct psp_context *psp, 2841 struct amdgpu_firmware_info *ucode) 2842 { 2843 struct amdgpu_device *adev = psp->adev; 2844 struct common_firmware_header *hdr; 2845 2846 switch (ucode->ucode_id) { 2847 case AMDGPU_UCODE_ID_SDMA0: 2848 case AMDGPU_UCODE_ID_SDMA1: 2849 case AMDGPU_UCODE_ID_SDMA2: 2850 case AMDGPU_UCODE_ID_SDMA3: 2851 case AMDGPU_UCODE_ID_SDMA4: 2852 case AMDGPU_UCODE_ID_SDMA5: 2853 case AMDGPU_UCODE_ID_SDMA6: 2854 case AMDGPU_UCODE_ID_SDMA7: 2855 hdr = (struct common_firmware_header *) 2856 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2857 amdgpu_ucode_print_sdma_hdr(hdr); 2858 break; 2859 case AMDGPU_UCODE_ID_CP_CE: 2860 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2861 amdgpu_ucode_print_gfx_hdr(hdr); 2862 break; 2863 case AMDGPU_UCODE_ID_CP_PFP: 2864 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2865 amdgpu_ucode_print_gfx_hdr(hdr); 2866 break; 2867 case AMDGPU_UCODE_ID_CP_ME: 2868 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2869 amdgpu_ucode_print_gfx_hdr(hdr); 2870 break; 2871 case AMDGPU_UCODE_ID_CP_MEC1: 2872 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2873 amdgpu_ucode_print_gfx_hdr(hdr); 2874 break; 2875 case AMDGPU_UCODE_ID_RLC_G: 2876 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2877 amdgpu_ucode_print_rlc_hdr(hdr); 2878 break; 2879 case AMDGPU_UCODE_ID_SMC: 2880 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2881 amdgpu_ucode_print_smc_hdr(hdr); 2882 break; 2883 default: 2884 break; 2885 } 2886 } 2887 2888 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2889 struct amdgpu_firmware_info *ucode, 2890 struct psp_gfx_cmd_resp *cmd) 2891 { 2892 int ret; 2893 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2894 2895 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2896 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2897 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2898 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2899 2900 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2901 if (ret) 2902 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2903 2904 return ret; 2905 } 2906 2907 int psp_execute_ip_fw_load(struct psp_context *psp, 2908 struct amdgpu_firmware_info *ucode) 2909 { 2910 int ret = 0; 2911 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2912 2913 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2914 if (!ret) { 2915 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2916 psp->fence_buf_mc_addr); 2917 } 2918 2919 release_psp_cmd_buf(psp); 2920 2921 return ret; 2922 } 2923 2924 static int psp_load_p2s_table(struct psp_context *psp) 2925 { 2926 int ret; 2927 struct amdgpu_device *adev = psp->adev; 2928 struct amdgpu_firmware_info *ucode = 2929 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2930 2931 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2932 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2933 return 0; 2934 2935 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 2936 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 2937 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2938 0x0036003C; 2939 if (psp->sos.fw_version < supp_vers) 2940 return 0; 2941 } 2942 2943 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2944 return 0; 2945 2946 ret = psp_execute_ip_fw_load(psp, ucode); 2947 2948 return ret; 2949 } 2950 2951 static int psp_load_smu_fw(struct psp_context *psp) 2952 { 2953 int ret; 2954 struct amdgpu_device *adev = psp->adev; 2955 struct amdgpu_firmware_info *ucode = 2956 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2957 struct amdgpu_ras *ras = psp->ras_context.ras; 2958 2959 /* 2960 * Skip SMU FW reloading in case of using BACO for runpm only, 2961 * as SMU is always alive. 2962 */ 2963 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2964 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2965 return 0; 2966 2967 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2968 return 0; 2969 2970 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2971 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2972 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2973 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2974 if (ret) 2975 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2976 } 2977 2978 ret = psp_execute_ip_fw_load(psp, ucode); 2979 2980 if (ret) 2981 dev_err(adev->dev, "PSP load smu failed!\n"); 2982 2983 return ret; 2984 } 2985 2986 static bool fw_load_skip_check(struct psp_context *psp, 2987 struct amdgpu_firmware_info *ucode) 2988 { 2989 if (!ucode->fw || !ucode->ucode_size) 2990 return true; 2991 2992 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2993 return true; 2994 2995 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2996 (psp_smu_reload_quirk(psp) || 2997 psp->autoload_supported || 2998 psp->pmfw_centralized_cstate_management)) 2999 return true; 3000 3001 if (amdgpu_sriov_vf(psp->adev) && 3002 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 3003 return true; 3004 3005 if (psp->autoload_supported && 3006 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 3007 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 3008 /* skip mec JT when autoload is enabled */ 3009 return true; 3010 3011 return false; 3012 } 3013 3014 int psp_load_fw_list(struct psp_context *psp, 3015 struct amdgpu_firmware_info **ucode_list, int ucode_count) 3016 { 3017 int ret = 0, i; 3018 struct amdgpu_firmware_info *ucode; 3019 3020 for (i = 0; i < ucode_count; ++i) { 3021 ucode = ucode_list[i]; 3022 psp_print_fw_hdr(psp, ucode); 3023 ret = psp_execute_ip_fw_load(psp, ucode); 3024 if (ret) 3025 return ret; 3026 } 3027 return ret; 3028 } 3029 3030 static int psp_load_non_psp_fw(struct psp_context *psp) 3031 { 3032 int i, ret; 3033 struct amdgpu_firmware_info *ucode; 3034 struct amdgpu_device *adev = psp->adev; 3035 3036 if (psp->autoload_supported && 3037 !psp->pmfw_centralized_cstate_management) { 3038 ret = psp_load_smu_fw(psp); 3039 if (ret) 3040 return ret; 3041 } 3042 3043 /* Load P2S table first if it's available */ 3044 psp_load_p2s_table(psp); 3045 3046 for (i = 0; i < adev->firmware.max_ucodes; i++) { 3047 ucode = &adev->firmware.ucode[i]; 3048 3049 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 3050 !fw_load_skip_check(psp, ucode)) { 3051 ret = psp_load_smu_fw(psp); 3052 if (ret) 3053 return ret; 3054 continue; 3055 } 3056 3057 if (fw_load_skip_check(psp, ucode)) 3058 continue; 3059 3060 if (psp->autoload_supported && 3061 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3062 IP_VERSION(11, 0, 7) || 3063 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3064 IP_VERSION(11, 0, 11) || 3065 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3066 IP_VERSION(11, 0, 12)) && 3067 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 3068 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 3069 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 3070 /* PSP only receive one SDMA fw for sienna_cichlid, 3071 * as all four sdma fw are same 3072 */ 3073 continue; 3074 3075 psp_print_fw_hdr(psp, ucode); 3076 3077 ret = psp_execute_ip_fw_load(psp, ucode); 3078 if (ret) 3079 return ret; 3080 3081 /* Start rlc autoload after psp received all the gfx firmware */ 3082 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 3083 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 3084 ret = psp_rlc_autoload_start(psp); 3085 if (ret) { 3086 dev_err(adev->dev, "Failed to start rlc autoload\n"); 3087 return ret; 3088 } 3089 } 3090 } 3091 3092 return 0; 3093 } 3094 3095 static int psp_load_fw(struct amdgpu_device *adev) 3096 { 3097 int ret; 3098 struct psp_context *psp = &adev->psp; 3099 3100 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3101 /* should not destroy ring, only stop */ 3102 psp_ring_stop(psp, PSP_RING_TYPE__KM); 3103 } else { 3104 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 3105 3106 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 3107 if (ret) { 3108 dev_err(adev->dev, "PSP ring init failed!\n"); 3109 goto failed; 3110 } 3111 } 3112 3113 ret = psp_hw_start(psp); 3114 if (ret) 3115 goto failed; 3116 3117 ret = psp_load_non_psp_fw(psp); 3118 if (ret) 3119 goto failed1; 3120 3121 ret = psp_asd_initialize(psp); 3122 if (ret) { 3123 dev_err(adev->dev, "PSP load asd failed!\n"); 3124 goto failed1; 3125 } 3126 3127 ret = psp_rl_load(adev); 3128 if (ret) { 3129 dev_err(adev->dev, "PSP load RL failed!\n"); 3130 goto failed1; 3131 } 3132 3133 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3134 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3135 ret = psp_xgmi_initialize(psp, false, true); 3136 /* Warning the XGMI seesion initialize failure 3137 * Instead of stop driver initialization 3138 */ 3139 if (ret) 3140 dev_err(psp->adev->dev, 3141 "XGMI: Failed to initialize XGMI session\n"); 3142 } 3143 } 3144 3145 if (psp->ta_fw) { 3146 ret = psp_ras_initialize(psp); 3147 if (ret) 3148 dev_err(psp->adev->dev, 3149 "RAS: Failed to initialize RAS\n"); 3150 3151 ret = psp_hdcp_initialize(psp); 3152 if (ret) 3153 dev_err(psp->adev->dev, 3154 "HDCP: Failed to initialize HDCP\n"); 3155 3156 ret = psp_dtm_initialize(psp); 3157 if (ret) 3158 dev_err(psp->adev->dev, 3159 "DTM: Failed to initialize DTM\n"); 3160 3161 ret = psp_rap_initialize(psp); 3162 if (ret) 3163 dev_err(psp->adev->dev, 3164 "RAP: Failed to initialize RAP\n"); 3165 3166 ret = psp_securedisplay_initialize(psp); 3167 if (ret) 3168 dev_err(psp->adev->dev, 3169 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3170 } 3171 3172 return 0; 3173 3174 failed1: 3175 psp_free_shared_bufs(psp); 3176 failed: 3177 /* 3178 * all cleanup jobs (xgmi terminate, ras terminate, 3179 * ring destroy, cmd/fence/fw buffers destory, 3180 * psp->cmd destory) are delayed to psp_hw_fini 3181 */ 3182 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3183 return ret; 3184 } 3185 3186 static int psp_hw_init(struct amdgpu_ip_block *ip_block) 3187 { 3188 int ret; 3189 struct amdgpu_device *adev = ip_block->adev; 3190 3191 mutex_lock(&adev->firmware.mutex); 3192 3193 ret = amdgpu_ucode_init_bo(adev); 3194 if (ret) 3195 goto failed; 3196 3197 ret = psp_load_fw(adev); 3198 if (ret) { 3199 dev_err(adev->dev, "PSP firmware loading failed\n"); 3200 goto failed; 3201 } 3202 3203 mutex_unlock(&adev->firmware.mutex); 3204 return 0; 3205 3206 failed: 3207 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 3208 mutex_unlock(&adev->firmware.mutex); 3209 return -EINVAL; 3210 } 3211 3212 static int psp_hw_fini(struct amdgpu_ip_block *ip_block) 3213 { 3214 struct amdgpu_device *adev = ip_block->adev; 3215 struct psp_context *psp = &adev->psp; 3216 3217 if (psp->ta_fw) { 3218 psp_ras_terminate(psp); 3219 psp_securedisplay_terminate(psp); 3220 psp_rap_terminate(psp); 3221 psp_dtm_terminate(psp); 3222 psp_hdcp_terminate(psp); 3223 3224 if (adev->gmc.xgmi.num_physical_nodes > 1) 3225 psp_xgmi_terminate(psp); 3226 } 3227 3228 psp_asd_terminate(psp); 3229 psp_tmr_terminate(psp); 3230 3231 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3232 3233 return 0; 3234 } 3235 3236 static int psp_suspend(struct amdgpu_ip_block *ip_block) 3237 { 3238 int ret = 0; 3239 struct amdgpu_device *adev = ip_block->adev; 3240 struct psp_context *psp = &adev->psp; 3241 3242 if (adev->gmc.xgmi.num_physical_nodes > 1 && 3243 psp->xgmi_context.context.initialized) { 3244 ret = psp_xgmi_terminate(psp); 3245 if (ret) { 3246 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 3247 goto out; 3248 } 3249 } 3250 3251 if (psp->ta_fw) { 3252 ret = psp_ras_terminate(psp); 3253 if (ret) { 3254 dev_err(adev->dev, "Failed to terminate ras ta\n"); 3255 goto out; 3256 } 3257 ret = psp_hdcp_terminate(psp); 3258 if (ret) { 3259 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 3260 goto out; 3261 } 3262 ret = psp_dtm_terminate(psp); 3263 if (ret) { 3264 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 3265 goto out; 3266 } 3267 ret = psp_rap_terminate(psp); 3268 if (ret) { 3269 dev_err(adev->dev, "Failed to terminate rap ta\n"); 3270 goto out; 3271 } 3272 ret = psp_securedisplay_terminate(psp); 3273 if (ret) { 3274 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 3275 goto out; 3276 } 3277 } 3278 3279 ret = psp_asd_terminate(psp); 3280 if (ret) { 3281 dev_err(adev->dev, "Failed to terminate asd\n"); 3282 goto out; 3283 } 3284 3285 ret = psp_tmr_terminate(psp); 3286 if (ret) { 3287 dev_err(adev->dev, "Failed to terminate tmr\n"); 3288 goto out; 3289 } 3290 3291 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 3292 if (ret) 3293 dev_err(adev->dev, "PSP ring stop failed\n"); 3294 3295 out: 3296 return ret; 3297 } 3298 3299 static int psp_resume(struct amdgpu_ip_block *ip_block) 3300 { 3301 int ret; 3302 struct amdgpu_device *adev = ip_block->adev; 3303 struct psp_context *psp = &adev->psp; 3304 3305 dev_info(adev->dev, "PSP is resuming...\n"); 3306 3307 if (psp->mem_train_ctx.enable_mem_training) { 3308 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3309 if (ret) { 3310 dev_err(adev->dev, "Failed to process memory training!\n"); 3311 return ret; 3312 } 3313 } 3314 3315 mutex_lock(&adev->firmware.mutex); 3316 3317 ret = amdgpu_ucode_init_bo(adev); 3318 if (ret) 3319 goto failed; 3320 3321 ret = psp_hw_start(psp); 3322 if (ret) 3323 goto failed; 3324 3325 ret = psp_load_non_psp_fw(psp); 3326 if (ret) 3327 goto failed; 3328 3329 ret = psp_asd_initialize(psp); 3330 if (ret) { 3331 dev_err(adev->dev, "PSP load asd failed!\n"); 3332 goto failed; 3333 } 3334 3335 ret = psp_rl_load(adev); 3336 if (ret) { 3337 dev_err(adev->dev, "PSP load RL failed!\n"); 3338 goto failed; 3339 } 3340 3341 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3342 ret = psp_xgmi_initialize(psp, false, true); 3343 /* Warning the XGMI seesion initialize failure 3344 * Instead of stop driver initialization 3345 */ 3346 if (ret) 3347 dev_err(psp->adev->dev, 3348 "XGMI: Failed to initialize XGMI session\n"); 3349 } 3350 3351 if (psp->ta_fw) { 3352 ret = psp_ras_initialize(psp); 3353 if (ret) 3354 dev_err(psp->adev->dev, 3355 "RAS: Failed to initialize RAS\n"); 3356 3357 ret = psp_hdcp_initialize(psp); 3358 if (ret) 3359 dev_err(psp->adev->dev, 3360 "HDCP: Failed to initialize HDCP\n"); 3361 3362 ret = psp_dtm_initialize(psp); 3363 if (ret) 3364 dev_err(psp->adev->dev, 3365 "DTM: Failed to initialize DTM\n"); 3366 3367 ret = psp_rap_initialize(psp); 3368 if (ret) 3369 dev_err(psp->adev->dev, 3370 "RAP: Failed to initialize RAP\n"); 3371 3372 ret = psp_securedisplay_initialize(psp); 3373 if (ret) 3374 dev_err(psp->adev->dev, 3375 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3376 } 3377 3378 mutex_unlock(&adev->firmware.mutex); 3379 3380 return 0; 3381 3382 failed: 3383 dev_err(adev->dev, "PSP resume failed\n"); 3384 mutex_unlock(&adev->firmware.mutex); 3385 return ret; 3386 } 3387 3388 int psp_gpu_reset(struct amdgpu_device *adev) 3389 { 3390 int ret; 3391 3392 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3393 return 0; 3394 3395 mutex_lock(&adev->psp.mutex); 3396 ret = psp_mode1_reset(&adev->psp); 3397 mutex_unlock(&adev->psp.mutex); 3398 3399 return ret; 3400 } 3401 3402 int psp_rlc_autoload_start(struct psp_context *psp) 3403 { 3404 int ret; 3405 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3406 3407 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3408 3409 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3410 psp->fence_buf_mc_addr); 3411 3412 release_psp_cmd_buf(psp); 3413 3414 return ret; 3415 } 3416 3417 int psp_ring_cmd_submit(struct psp_context *psp, 3418 uint64_t cmd_buf_mc_addr, 3419 uint64_t fence_mc_addr, 3420 int index) 3421 { 3422 unsigned int psp_write_ptr_reg = 0; 3423 struct psp_gfx_rb_frame *write_frame; 3424 struct psp_ring *ring = &psp->km_ring; 3425 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3426 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3427 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3428 struct amdgpu_device *adev = psp->adev; 3429 uint32_t ring_size_dw = ring->ring_size / 4; 3430 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3431 3432 /* KM (GPCOM) prepare write pointer */ 3433 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3434 3435 /* Update KM RB frame pointer to new frame */ 3436 /* write_frame ptr increments by size of rb_frame in bytes */ 3437 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3438 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3439 write_frame = ring_buffer_start; 3440 else 3441 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3442 /* Check invalid write_frame ptr address */ 3443 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3444 dev_err(adev->dev, 3445 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3446 ring_buffer_start, ring_buffer_end, write_frame); 3447 dev_err(adev->dev, 3448 "write_frame is pointing to address out of bounds\n"); 3449 return -EINVAL; 3450 } 3451 3452 /* Initialize KM RB frame */ 3453 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3454 3455 /* Update KM RB frame */ 3456 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3457 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3458 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3459 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3460 write_frame->fence_value = index; 3461 amdgpu_device_flush_hdp(adev, NULL); 3462 3463 /* Update the write Pointer in DWORDs */ 3464 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3465 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3466 return 0; 3467 } 3468 3469 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3470 { 3471 struct amdgpu_device *adev = psp->adev; 3472 const struct psp_firmware_header_v1_0 *asd_hdr; 3473 int err = 0; 3474 3475 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED, 3476 "amdgpu/%s_asd.bin", chip_name); 3477 if (err) 3478 goto out; 3479 3480 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3481 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3482 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3483 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3484 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3485 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3486 return 0; 3487 out: 3488 amdgpu_ucode_release(&adev->psp.asd_fw); 3489 return err; 3490 } 3491 3492 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3493 { 3494 struct amdgpu_device *adev = psp->adev; 3495 const struct psp_firmware_header_v1_0 *toc_hdr; 3496 int err = 0; 3497 3498 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED, 3499 "amdgpu/%s_toc.bin", chip_name); 3500 if (err) 3501 goto out; 3502 3503 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3504 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3505 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3506 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3507 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3508 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3509 return 0; 3510 out: 3511 amdgpu_ucode_release(&adev->psp.toc_fw); 3512 return err; 3513 } 3514 3515 static int parse_sos_bin_descriptor(struct psp_context *psp, 3516 const struct psp_fw_bin_desc *desc, 3517 const struct psp_firmware_header_v2_0 *sos_hdr) 3518 { 3519 uint8_t *ucode_start_addr = NULL; 3520 3521 if (!psp || !desc || !sos_hdr) 3522 return -EINVAL; 3523 3524 ucode_start_addr = (uint8_t *)sos_hdr + 3525 le32_to_cpu(desc->offset_bytes) + 3526 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3527 3528 switch (desc->fw_type) { 3529 case PSP_FW_TYPE_PSP_SOS: 3530 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3531 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3532 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3533 psp->sos.start_addr = ucode_start_addr; 3534 break; 3535 case PSP_FW_TYPE_PSP_SYS_DRV: 3536 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3537 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3538 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3539 psp->sys.start_addr = ucode_start_addr; 3540 break; 3541 case PSP_FW_TYPE_PSP_KDB: 3542 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3543 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3544 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3545 psp->kdb.start_addr = ucode_start_addr; 3546 break; 3547 case PSP_FW_TYPE_PSP_TOC: 3548 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3549 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3550 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3551 psp->toc.start_addr = ucode_start_addr; 3552 break; 3553 case PSP_FW_TYPE_PSP_SPL: 3554 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3555 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3556 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3557 psp->spl.start_addr = ucode_start_addr; 3558 break; 3559 case PSP_FW_TYPE_PSP_RL: 3560 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3561 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3562 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3563 psp->rl.start_addr = ucode_start_addr; 3564 break; 3565 case PSP_FW_TYPE_PSP_SOC_DRV: 3566 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3567 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3568 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3569 psp->soc_drv.start_addr = ucode_start_addr; 3570 break; 3571 case PSP_FW_TYPE_PSP_INTF_DRV: 3572 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3573 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3574 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3575 psp->intf_drv.start_addr = ucode_start_addr; 3576 break; 3577 case PSP_FW_TYPE_PSP_DBG_DRV: 3578 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3579 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3580 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3581 psp->dbg_drv.start_addr = ucode_start_addr; 3582 break; 3583 case PSP_FW_TYPE_PSP_RAS_DRV: 3584 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3585 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3586 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3587 psp->ras_drv.start_addr = ucode_start_addr; 3588 break; 3589 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3590 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3591 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3592 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3593 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3594 break; 3595 case PSP_FW_TYPE_PSP_SPDM_DRV: 3596 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version); 3597 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version); 3598 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3599 psp->spdm_drv.start_addr = ucode_start_addr; 3600 break; 3601 default: 3602 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3603 break; 3604 } 3605 3606 return 0; 3607 } 3608 3609 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3610 { 3611 const struct psp_firmware_header_v1_0 *sos_hdr; 3612 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3613 uint8_t *ucode_array_start_addr; 3614 3615 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3616 ucode_array_start_addr = (uint8_t *)sos_hdr + 3617 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3618 3619 if (adev->gmc.xgmi.connected_to_cpu || 3620 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3621 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3622 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3623 3624 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3625 adev->psp.sys.start_addr = ucode_array_start_addr; 3626 3627 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3628 adev->psp.sos.start_addr = ucode_array_start_addr + 3629 le32_to_cpu(sos_hdr->sos.offset_bytes); 3630 } else { 3631 /* Load alternate PSP SOS FW */ 3632 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3633 3634 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3635 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3636 3637 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3638 adev->psp.sys.start_addr = ucode_array_start_addr + 3639 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3640 3641 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3642 adev->psp.sos.start_addr = ucode_array_start_addr + 3643 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3644 } 3645 3646 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3647 dev_warn(adev->dev, "PSP SOS FW not available"); 3648 return -EINVAL; 3649 } 3650 3651 return 0; 3652 } 3653 3654 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3655 { 3656 struct amdgpu_device *adev = psp->adev; 3657 const struct psp_firmware_header_v1_0 *sos_hdr; 3658 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3659 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3660 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3661 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3662 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; 3663 int fw_index, fw_bin_count, start_index = 0; 3664 const struct psp_fw_bin_desc *fw_bin; 3665 uint8_t *ucode_array_start_addr; 3666 int err = 0; 3667 3668 if (amdgpu_is_kicker_fw(adev)) 3669 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3670 "amdgpu/%s_sos_kicker.bin", chip_name); 3671 else 3672 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3673 "amdgpu/%s_sos.bin", chip_name); 3674 if (err) 3675 goto out; 3676 3677 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3678 ucode_array_start_addr = (uint8_t *)sos_hdr + 3679 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3680 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3681 3682 switch (sos_hdr->header.header_version_major) { 3683 case 1: 3684 err = psp_init_sos_base_fw(adev); 3685 if (err) 3686 goto out; 3687 3688 if (sos_hdr->header.header_version_minor == 1) { 3689 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3690 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3691 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3692 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3693 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3694 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3695 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3696 } 3697 if (sos_hdr->header.header_version_minor == 2) { 3698 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3699 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3700 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3701 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3702 } 3703 if (sos_hdr->header.header_version_minor == 3) { 3704 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3705 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3706 adev->psp.toc.start_addr = ucode_array_start_addr + 3707 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3708 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3709 adev->psp.kdb.start_addr = ucode_array_start_addr + 3710 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3711 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3712 adev->psp.spl.start_addr = ucode_array_start_addr + 3713 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3714 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3715 adev->psp.rl.start_addr = ucode_array_start_addr + 3716 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3717 } 3718 break; 3719 case 2: 3720 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3721 3722 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); 3723 3724 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { 3725 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3726 err = -EINVAL; 3727 goto out; 3728 } 3729 3730 if (sos_hdr_v2_0->header.header_version_minor == 1) { 3731 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; 3732 3733 fw_bin = sos_hdr_v2_1->psp_fw_bin; 3734 3735 if (psp_is_aux_sos_load_required(psp)) 3736 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3737 else 3738 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3739 3740 } else { 3741 fw_bin = sos_hdr_v2_0->psp_fw_bin; 3742 } 3743 3744 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { 3745 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, 3746 sos_hdr_v2_0); 3747 if (err) 3748 goto out; 3749 } 3750 break; 3751 default: 3752 dev_err(adev->dev, 3753 "unsupported psp sos firmware\n"); 3754 err = -EINVAL; 3755 goto out; 3756 } 3757 3758 return 0; 3759 out: 3760 amdgpu_ucode_release(&adev->psp.sos_fw); 3761 3762 return err; 3763 } 3764 3765 static bool is_ta_fw_applicable(struct psp_context *psp, 3766 const struct psp_fw_bin_desc *desc) 3767 { 3768 struct amdgpu_device *adev = psp->adev; 3769 uint32_t fw_version; 3770 3771 switch (desc->fw_type) { 3772 case TA_FW_TYPE_PSP_XGMI: 3773 case TA_FW_TYPE_PSP_XGMI_AUX: 3774 /* for now, AUX TA only exists on 13.0.6 ta bin, 3775 * from v20.00.0x.14 3776 */ 3777 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3778 IP_VERSION(13, 0, 6)) { 3779 fw_version = le32_to_cpu(desc->fw_version); 3780 3781 if (adev->flags & AMD_IS_APU && 3782 (fw_version & 0xff) >= 0x14) 3783 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; 3784 else 3785 return desc->fw_type == TA_FW_TYPE_PSP_XGMI; 3786 } 3787 break; 3788 default: 3789 break; 3790 } 3791 3792 return true; 3793 } 3794 3795 static int parse_ta_bin_descriptor(struct psp_context *psp, 3796 const struct psp_fw_bin_desc *desc, 3797 const struct ta_firmware_header_v2_0 *ta_hdr) 3798 { 3799 uint8_t *ucode_start_addr = NULL; 3800 3801 if (!psp || !desc || !ta_hdr) 3802 return -EINVAL; 3803 3804 if (!is_ta_fw_applicable(psp, desc)) 3805 return 0; 3806 3807 ucode_start_addr = (uint8_t *)ta_hdr + 3808 le32_to_cpu(desc->offset_bytes) + 3809 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3810 3811 switch (desc->fw_type) { 3812 case TA_FW_TYPE_PSP_ASD: 3813 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3814 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3815 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3816 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3817 break; 3818 case TA_FW_TYPE_PSP_XGMI: 3819 case TA_FW_TYPE_PSP_XGMI_AUX: 3820 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3821 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3822 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3823 break; 3824 case TA_FW_TYPE_PSP_RAS: 3825 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3826 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3827 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3828 break; 3829 case TA_FW_TYPE_PSP_HDCP: 3830 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3831 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3832 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3833 break; 3834 case TA_FW_TYPE_PSP_DTM: 3835 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3836 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3837 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3838 break; 3839 case TA_FW_TYPE_PSP_RAP: 3840 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3841 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3842 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3843 break; 3844 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3845 psp->securedisplay_context.context.bin_desc.fw_version = 3846 le32_to_cpu(desc->fw_version); 3847 psp->securedisplay_context.context.bin_desc.size_bytes = 3848 le32_to_cpu(desc->size_bytes); 3849 psp->securedisplay_context.context.bin_desc.start_addr = 3850 ucode_start_addr; 3851 break; 3852 default: 3853 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3854 break; 3855 } 3856 3857 return 0; 3858 } 3859 3860 static int parse_ta_v1_microcode(struct psp_context *psp) 3861 { 3862 const struct ta_firmware_header_v1_0 *ta_hdr; 3863 struct amdgpu_device *adev = psp->adev; 3864 3865 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3866 3867 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3868 return -EINVAL; 3869 3870 adev->psp.xgmi_context.context.bin_desc.fw_version = 3871 le32_to_cpu(ta_hdr->xgmi.fw_version); 3872 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3873 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3874 adev->psp.xgmi_context.context.bin_desc.start_addr = 3875 (uint8_t *)ta_hdr + 3876 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3877 3878 adev->psp.ras_context.context.bin_desc.fw_version = 3879 le32_to_cpu(ta_hdr->ras.fw_version); 3880 adev->psp.ras_context.context.bin_desc.size_bytes = 3881 le32_to_cpu(ta_hdr->ras.size_bytes); 3882 adev->psp.ras_context.context.bin_desc.start_addr = 3883 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3884 le32_to_cpu(ta_hdr->ras.offset_bytes); 3885 3886 adev->psp.hdcp_context.context.bin_desc.fw_version = 3887 le32_to_cpu(ta_hdr->hdcp.fw_version); 3888 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3889 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3890 adev->psp.hdcp_context.context.bin_desc.start_addr = 3891 (uint8_t *)ta_hdr + 3892 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3893 3894 adev->psp.dtm_context.context.bin_desc.fw_version = 3895 le32_to_cpu(ta_hdr->dtm.fw_version); 3896 adev->psp.dtm_context.context.bin_desc.size_bytes = 3897 le32_to_cpu(ta_hdr->dtm.size_bytes); 3898 adev->psp.dtm_context.context.bin_desc.start_addr = 3899 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3900 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3901 3902 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3903 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3904 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3905 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3906 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3907 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3908 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3909 3910 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3911 3912 return 0; 3913 } 3914 3915 static int parse_ta_v2_microcode(struct psp_context *psp) 3916 { 3917 const struct ta_firmware_header_v2_0 *ta_hdr; 3918 struct amdgpu_device *adev = psp->adev; 3919 int err = 0; 3920 int ta_index = 0; 3921 3922 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3923 3924 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3925 return -EINVAL; 3926 3927 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3928 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3929 return -EINVAL; 3930 } 3931 3932 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3933 err = parse_ta_bin_descriptor(psp, 3934 &ta_hdr->ta_fw_bin[ta_index], 3935 ta_hdr); 3936 if (err) 3937 return err; 3938 } 3939 3940 return 0; 3941 } 3942 3943 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3944 { 3945 const struct common_firmware_header *hdr; 3946 struct amdgpu_device *adev = psp->adev; 3947 int err; 3948 3949 if (amdgpu_is_kicker_fw(adev)) 3950 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3951 "amdgpu/%s_ta_kicker.bin", chip_name); 3952 else 3953 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3954 "amdgpu/%s_ta.bin", chip_name); 3955 if (err) 3956 return err; 3957 3958 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3959 switch (le16_to_cpu(hdr->header_version_major)) { 3960 case 1: 3961 err = parse_ta_v1_microcode(psp); 3962 break; 3963 case 2: 3964 err = parse_ta_v2_microcode(psp); 3965 break; 3966 default: 3967 dev_err(adev->dev, "unsupported TA header version\n"); 3968 err = -EINVAL; 3969 } 3970 3971 if (err) 3972 amdgpu_ucode_release(&adev->psp.ta_fw); 3973 3974 return err; 3975 } 3976 3977 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3978 { 3979 struct amdgpu_device *adev = psp->adev; 3980 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3981 struct amdgpu_firmware_info *info = NULL; 3982 int err = 0; 3983 3984 if (!amdgpu_sriov_vf(adev)) { 3985 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3986 return -EINVAL; 3987 } 3988 3989 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL, 3990 "amdgpu/%s_cap.bin", chip_name); 3991 if (err) { 3992 if (err == -ENODEV) { 3993 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3994 err = 0; 3995 } else { 3996 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3997 } 3998 goto out; 3999 } 4000 4001 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 4002 info->ucode_id = AMDGPU_UCODE_ID_CAP; 4003 info->fw = adev->psp.cap_fw; 4004 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 4005 adev->psp.cap_fw->data; 4006 adev->firmware.fw_size += ALIGN( 4007 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 4008 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 4009 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 4010 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 4011 4012 return 0; 4013 4014 out: 4015 amdgpu_ucode_release(&adev->psp.cap_fw); 4016 return err; 4017 } 4018 4019 int psp_config_sq_perfmon(struct psp_context *psp, 4020 uint32_t xcp_id, bool core_override_enable, 4021 bool reg_override_enable, bool perfmon_override_enable) 4022 { 4023 int ret; 4024 4025 if (amdgpu_sriov_vf(psp->adev)) 4026 return 0; 4027 4028 if (xcp_id > MAX_XCP) { 4029 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); 4030 return -EINVAL; 4031 } 4032 4033 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { 4034 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", 4035 amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); 4036 return -EINVAL; 4037 } 4038 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 4039 4040 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; 4041 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); 4042 cmd->cmd.config_sq_perfmon.core_override = core_override_enable; 4043 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; 4044 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; 4045 4046 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 4047 if (ret) 4048 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", 4049 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); 4050 4051 release_psp_cmd_buf(psp); 4052 return ret; 4053 } 4054 4055 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 4056 enum amd_clockgating_state state) 4057 { 4058 return 0; 4059 } 4060 4061 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block, 4062 enum amd_powergating_state state) 4063 { 4064 return 0; 4065 } 4066 4067 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 4068 struct device_attribute *attr, 4069 char *buf) 4070 { 4071 struct drm_device *ddev = dev_get_drvdata(dev); 4072 struct amdgpu_device *adev = drm_to_adev(ddev); 4073 struct amdgpu_ip_block *ip_block; 4074 uint32_t fw_ver; 4075 int ret; 4076 4077 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4078 if (!ip_block || !ip_block->status.late_initialized) { 4079 dev_info(adev->dev, "PSP block is not ready yet\n."); 4080 return -EBUSY; 4081 } 4082 4083 mutex_lock(&adev->psp.mutex); 4084 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 4085 mutex_unlock(&adev->psp.mutex); 4086 4087 if (ret) { 4088 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 4089 return ret; 4090 } 4091 4092 return sysfs_emit(buf, "%x\n", fw_ver); 4093 } 4094 4095 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 4096 struct device_attribute *attr, 4097 const char *buf, 4098 size_t count) 4099 { 4100 struct drm_device *ddev = dev_get_drvdata(dev); 4101 struct amdgpu_device *adev = drm_to_adev(ddev); 4102 int ret, idx; 4103 const struct firmware *usbc_pd_fw; 4104 struct amdgpu_bo *fw_buf_bo = NULL; 4105 uint64_t fw_pri_mc_addr; 4106 void *fw_pri_cpu_addr; 4107 struct amdgpu_ip_block *ip_block; 4108 4109 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4110 if (!ip_block || !ip_block->status.late_initialized) { 4111 dev_err(adev->dev, "PSP block is not ready yet."); 4112 return -EBUSY; 4113 } 4114 4115 if (!drm_dev_enter(ddev, &idx)) 4116 return -ENODEV; 4117 4118 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED, 4119 "amdgpu/%s", buf); 4120 if (ret) 4121 goto fail; 4122 4123 /* LFB address which is aligned to 1MB boundary per PSP request */ 4124 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 4125 AMDGPU_GEM_DOMAIN_VRAM | 4126 AMDGPU_GEM_DOMAIN_GTT, 4127 &fw_buf_bo, &fw_pri_mc_addr, 4128 &fw_pri_cpu_addr); 4129 if (ret) 4130 goto rel_buf; 4131 4132 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 4133 4134 mutex_lock(&adev->psp.mutex); 4135 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 4136 mutex_unlock(&adev->psp.mutex); 4137 4138 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4139 4140 rel_buf: 4141 amdgpu_ucode_release(&usbc_pd_fw); 4142 fail: 4143 if (ret) { 4144 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 4145 count = ret; 4146 } 4147 4148 drm_dev_exit(idx); 4149 return count; 4150 } 4151 4152 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 4153 { 4154 int idx; 4155 4156 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 4157 return; 4158 4159 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 4160 memcpy(psp->fw_pri_buf, start_addr, bin_size); 4161 4162 drm_dev_exit(idx); 4163 } 4164 4165 /** 4166 * DOC: usbc_pd_fw 4167 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 4168 * this file will trigger the update process. 4169 */ 4170 static DEVICE_ATTR(usbc_pd_fw, 0644, 4171 psp_usbc_pd_fw_sysfs_read, 4172 psp_usbc_pd_fw_sysfs_write); 4173 4174 int is_psp_fw_valid(struct psp_bin_desc bin) 4175 { 4176 return bin.size_bytes; 4177 } 4178 4179 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 4180 const struct bin_attribute *bin_attr, 4181 char *buffer, loff_t pos, size_t count) 4182 { 4183 struct device *dev = kobj_to_dev(kobj); 4184 struct drm_device *ddev = dev_get_drvdata(dev); 4185 struct amdgpu_device *adev = drm_to_adev(ddev); 4186 4187 adev->psp.vbflash_done = false; 4188 4189 /* Safeguard against memory drain */ 4190 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 4191 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 4192 kvfree(adev->psp.vbflash_tmp_buf); 4193 adev->psp.vbflash_tmp_buf = NULL; 4194 adev->psp.vbflash_image_size = 0; 4195 return -ENOMEM; 4196 } 4197 4198 /* TODO Just allocate max for now and optimize to realloc later if needed */ 4199 if (!adev->psp.vbflash_tmp_buf) { 4200 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 4201 if (!adev->psp.vbflash_tmp_buf) 4202 return -ENOMEM; 4203 } 4204 4205 mutex_lock(&adev->psp.mutex); 4206 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 4207 adev->psp.vbflash_image_size += count; 4208 mutex_unlock(&adev->psp.mutex); 4209 4210 dev_dbg(adev->dev, "IFWI staged for update\n"); 4211 4212 return count; 4213 } 4214 4215 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 4216 const struct bin_attribute *bin_attr, char *buffer, 4217 loff_t pos, size_t count) 4218 { 4219 struct device *dev = kobj_to_dev(kobj); 4220 struct drm_device *ddev = dev_get_drvdata(dev); 4221 struct amdgpu_device *adev = drm_to_adev(ddev); 4222 struct amdgpu_bo *fw_buf_bo = NULL; 4223 uint64_t fw_pri_mc_addr; 4224 void *fw_pri_cpu_addr; 4225 int ret; 4226 4227 if (adev->psp.vbflash_image_size == 0) 4228 return -EINVAL; 4229 4230 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 4231 4232 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 4233 AMDGPU_GPU_PAGE_SIZE, 4234 AMDGPU_GEM_DOMAIN_VRAM, 4235 &fw_buf_bo, 4236 &fw_pri_mc_addr, 4237 &fw_pri_cpu_addr); 4238 if (ret) 4239 goto rel_buf; 4240 4241 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 4242 4243 mutex_lock(&adev->psp.mutex); 4244 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 4245 mutex_unlock(&adev->psp.mutex); 4246 4247 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4248 4249 rel_buf: 4250 kvfree(adev->psp.vbflash_tmp_buf); 4251 adev->psp.vbflash_tmp_buf = NULL; 4252 adev->psp.vbflash_image_size = 0; 4253 4254 if (ret) { 4255 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 4256 return ret; 4257 } 4258 4259 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 4260 return 0; 4261 } 4262 4263 /** 4264 * DOC: psp_vbflash 4265 * Writing to this file will stage an IFWI for update. Reading from this file 4266 * will trigger the update process. 4267 */ 4268 static const struct bin_attribute psp_vbflash_bin_attr = { 4269 .attr = {.name = "psp_vbflash", .mode = 0660}, 4270 .size = 0, 4271 .write = amdgpu_psp_vbflash_write, 4272 .read = amdgpu_psp_vbflash_read, 4273 }; 4274 4275 /** 4276 * DOC: psp_vbflash_status 4277 * The status of the flash process. 4278 * 0: IFWI flash not complete. 4279 * 1: IFWI flash complete. 4280 */ 4281 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 4282 struct device_attribute *attr, 4283 char *buf) 4284 { 4285 struct drm_device *ddev = dev_get_drvdata(dev); 4286 struct amdgpu_device *adev = drm_to_adev(ddev); 4287 uint32_t vbflash_status; 4288 4289 vbflash_status = psp_vbflash_status(&adev->psp); 4290 if (!adev->psp.vbflash_done) 4291 vbflash_status = 0; 4292 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 4293 vbflash_status = 1; 4294 4295 return sysfs_emit(buf, "0x%x\n", vbflash_status); 4296 } 4297 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 4298 4299 static const struct bin_attribute *const bin_flash_attrs[] = { 4300 &psp_vbflash_bin_attr, 4301 NULL 4302 }; 4303 4304 static struct attribute *flash_attrs[] = { 4305 &dev_attr_psp_vbflash_status.attr, 4306 &dev_attr_usbc_pd_fw.attr, 4307 NULL 4308 }; 4309 4310 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 4311 { 4312 struct device *dev = kobj_to_dev(kobj); 4313 struct drm_device *ddev = dev_get_drvdata(dev); 4314 struct amdgpu_device *adev = drm_to_adev(ddev); 4315 4316 if (attr == &dev_attr_usbc_pd_fw.attr) 4317 return adev->psp.sup_pd_fw_up ? 0660 : 0; 4318 4319 return adev->psp.sup_ifwi_up ? 0440 : 0; 4320 } 4321 4322 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 4323 const struct bin_attribute *attr, 4324 int idx) 4325 { 4326 struct device *dev = kobj_to_dev(kobj); 4327 struct drm_device *ddev = dev_get_drvdata(dev); 4328 struct amdgpu_device *adev = drm_to_adev(ddev); 4329 4330 return adev->psp.sup_ifwi_up ? 0660 : 0; 4331 } 4332 4333 const struct attribute_group amdgpu_flash_attr_group = { 4334 .attrs = flash_attrs, 4335 .bin_attrs = bin_flash_attrs, 4336 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 4337 .is_visible = amdgpu_flash_attr_is_visible, 4338 }; 4339 4340 #if defined(CONFIG_DEBUG_FS) 4341 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp) 4342 { 4343 struct amdgpu_device *adev = filp->f_inode->i_private; 4344 struct spirom_bo *bo_triplet; 4345 int ret; 4346 4347 /* serialize the open() file calling */ 4348 if (!mutex_trylock(&adev->psp.mutex)) 4349 return -EBUSY; 4350 4351 /* 4352 * make sure only one userpace process is alive for dumping so that 4353 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed. 4354 * let's say the case where one process try opening the file while 4355 * another one has proceeded to read or release. In this way, eliminate 4356 * the use of mutex for read() or release() callback as well. 4357 */ 4358 if (adev->psp.spirom_dump_trip) { 4359 mutex_unlock(&adev->psp.mutex); 4360 return -EBUSY; 4361 } 4362 4363 bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL); 4364 if (!bo_triplet) { 4365 mutex_unlock(&adev->psp.mutex); 4366 return -ENOMEM; 4367 } 4368 4369 ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2, 4370 AMDGPU_GPU_PAGE_SIZE, 4371 AMDGPU_GEM_DOMAIN_GTT, 4372 &bo_triplet->bo, 4373 &bo_triplet->mc_addr, 4374 &bo_triplet->cpu_addr); 4375 if (ret) 4376 goto rel_trip; 4377 4378 ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr); 4379 if (ret) 4380 goto rel_bo; 4381 4382 adev->psp.spirom_dump_trip = bo_triplet; 4383 mutex_unlock(&adev->psp.mutex); 4384 return 0; 4385 rel_bo: 4386 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4387 &bo_triplet->cpu_addr); 4388 rel_trip: 4389 kfree(bo_triplet); 4390 mutex_unlock(&adev->psp.mutex); 4391 dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret); 4392 return ret; 4393 } 4394 4395 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size, 4396 loff_t *pos) 4397 { 4398 struct amdgpu_device *adev = filp->f_inode->i_private; 4399 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4400 4401 if (!bo_triplet) 4402 return -EINVAL; 4403 4404 return simple_read_from_buffer(buf, 4405 size, 4406 pos, bo_triplet->cpu_addr, 4407 AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4408 } 4409 4410 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp) 4411 { 4412 struct amdgpu_device *adev = filp->f_inode->i_private; 4413 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4414 4415 if (bo_triplet) { 4416 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4417 &bo_triplet->cpu_addr); 4418 kfree(bo_triplet); 4419 } 4420 4421 adev->psp.spirom_dump_trip = NULL; 4422 return 0; 4423 } 4424 4425 static const struct file_operations psp_dump_spirom_debugfs_ops = { 4426 .owner = THIS_MODULE, 4427 .open = psp_read_spirom_debugfs_open, 4428 .read = psp_read_spirom_debugfs_read, 4429 .release = psp_read_spirom_debugfs_release, 4430 .llseek = default_llseek, 4431 }; 4432 #endif 4433 4434 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev) 4435 { 4436 #if defined(CONFIG_DEBUG_FS) 4437 struct drm_minor *minor = adev_to_drm(adev)->primary; 4438 4439 debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root, 4440 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4441 #endif 4442 } 4443 4444 const struct amd_ip_funcs psp_ip_funcs = { 4445 .name = "psp", 4446 .early_init = psp_early_init, 4447 .sw_init = psp_sw_init, 4448 .sw_fini = psp_sw_fini, 4449 .hw_init = psp_hw_init, 4450 .hw_fini = psp_hw_fini, 4451 .suspend = psp_suspend, 4452 .resume = psp_resume, 4453 .set_clockgating_state = psp_set_clockgating_state, 4454 .set_powergating_state = psp_set_powergating_state, 4455 }; 4456 4457 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 4458 .type = AMD_IP_BLOCK_TYPE_PSP, 4459 .major = 3, 4460 .minor = 1, 4461 .rev = 0, 4462 .funcs = &psp_ip_funcs, 4463 }; 4464 4465 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 4466 .type = AMD_IP_BLOCK_TYPE_PSP, 4467 .major = 10, 4468 .minor = 0, 4469 .rev = 0, 4470 .funcs = &psp_ip_funcs, 4471 }; 4472 4473 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 4474 .type = AMD_IP_BLOCK_TYPE_PSP, 4475 .major = 11, 4476 .minor = 0, 4477 .rev = 0, 4478 .funcs = &psp_ip_funcs, 4479 }; 4480 4481 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 4482 .type = AMD_IP_BLOCK_TYPE_PSP, 4483 .major = 11, 4484 .minor = 0, 4485 .rev = 8, 4486 .funcs = &psp_ip_funcs, 4487 }; 4488 4489 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 4490 .type = AMD_IP_BLOCK_TYPE_PSP, 4491 .major = 12, 4492 .minor = 0, 4493 .rev = 0, 4494 .funcs = &psp_ip_funcs, 4495 }; 4496 4497 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4498 .type = AMD_IP_BLOCK_TYPE_PSP, 4499 .major = 13, 4500 .minor = 0, 4501 .rev = 0, 4502 .funcs = &psp_ip_funcs, 4503 }; 4504 4505 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4506 .type = AMD_IP_BLOCK_TYPE_PSP, 4507 .major = 13, 4508 .minor = 0, 4509 .rev = 4, 4510 .funcs = &psp_ip_funcs, 4511 }; 4512 4513 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4514 .type = AMD_IP_BLOCK_TYPE_PSP, 4515 .major = 14, 4516 .minor = 0, 4517 .rev = 0, 4518 .funcs = &psp_ip_funcs, 4519 }; 4520