1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 43 #include "amdgpu_ras.h" 44 #include "amdgpu_securedisplay.h" 45 #include "amdgpu_atomfirmware.h" 46 47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16) 48 49 static int psp_load_smu_fw(struct psp_context *psp); 50 static int psp_rap_terminate(struct psp_context *psp); 51 static int psp_securedisplay_terminate(struct psp_context *psp); 52 53 static int psp_ring_init(struct psp_context *psp, 54 enum psp_ring_type ring_type) 55 { 56 int ret = 0; 57 struct psp_ring *ring; 58 struct amdgpu_device *adev = psp->adev; 59 60 ring = &psp->km_ring; 61 62 ring->ring_type = ring_type; 63 64 /* allocate 4k Page of Local Frame Buffer memory for ring */ 65 ring->ring_size = 0x1000; 66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 67 AMDGPU_GEM_DOMAIN_VRAM | 68 AMDGPU_GEM_DOMAIN_GTT, 69 &adev->firmware.rbuf, 70 &ring->ring_mem_mc_addr, 71 (void **)&ring->ring_mem); 72 if (ret) { 73 ring->ring_size = 0; 74 return ret; 75 } 76 77 return 0; 78 } 79 80 /* 81 * Due to DF Cstate management centralized to PMFW, the firmware 82 * loading sequence will be updated as below: 83 * - Load KDB 84 * - Load SYS_DRV 85 * - Load tOS 86 * - Load PMFW 87 * - Setup TMR 88 * - Load other non-psp fw 89 * - Load ASD 90 * - Load XGMI/RAS/HDCP/DTM TA if any 91 * 92 * This new sequence is required for 93 * - Arcturus and onwards 94 */ 95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 96 { 97 struct amdgpu_device *adev = psp->adev; 98 99 if (amdgpu_sriov_vf(adev)) { 100 psp->pmfw_centralized_cstate_management = false; 101 return; 102 } 103 104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 105 case IP_VERSION(11, 0, 0): 106 case IP_VERSION(11, 0, 4): 107 case IP_VERSION(11, 0, 5): 108 case IP_VERSION(11, 0, 7): 109 case IP_VERSION(11, 0, 9): 110 case IP_VERSION(11, 0, 11): 111 case IP_VERSION(11, 0, 12): 112 case IP_VERSION(11, 0, 13): 113 case IP_VERSION(13, 0, 0): 114 case IP_VERSION(13, 0, 2): 115 case IP_VERSION(13, 0, 7): 116 psp->pmfw_centralized_cstate_management = true; 117 break; 118 default: 119 psp->pmfw_centralized_cstate_management = false; 120 break; 121 } 122 } 123 124 static int psp_init_sriov_microcode(struct psp_context *psp) 125 { 126 struct amdgpu_device *adev = psp->adev; 127 char ucode_prefix[30]; 128 int ret = 0; 129 130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 131 132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 133 case IP_VERSION(9, 0, 0): 134 case IP_VERSION(11, 0, 7): 135 case IP_VERSION(11, 0, 9): 136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 137 ret = psp_init_cap_microcode(psp, ucode_prefix); 138 break; 139 case IP_VERSION(13, 0, 2): 140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 141 ret = psp_init_cap_microcode(psp, ucode_prefix); 142 ret &= psp_init_ta_microcode(psp, ucode_prefix); 143 break; 144 case IP_VERSION(13, 0, 0): 145 adev->virt.autoload_ucode_id = 0; 146 break; 147 case IP_VERSION(13, 0, 6): 148 case IP_VERSION(13, 0, 14): 149 ret = psp_init_cap_microcode(psp, ucode_prefix); 150 ret &= psp_init_ta_microcode(psp, ucode_prefix); 151 break; 152 case IP_VERSION(13, 0, 10): 153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 154 ret = psp_init_cap_microcode(psp, ucode_prefix); 155 break; 156 case IP_VERSION(13, 0, 12): 157 ret = psp_init_ta_microcode(psp, ucode_prefix); 158 break; 159 default: 160 return -EINVAL; 161 } 162 return ret; 163 } 164 165 static int psp_early_init(struct amdgpu_ip_block *ip_block) 166 { 167 struct amdgpu_device *adev = ip_block->adev; 168 struct psp_context *psp = &adev->psp; 169 170 psp->autoload_supported = true; 171 psp->boot_time_tmr = true; 172 173 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 174 case IP_VERSION(9, 0, 0): 175 psp_v3_1_set_psp_funcs(psp); 176 psp->autoload_supported = false; 177 psp->boot_time_tmr = false; 178 break; 179 case IP_VERSION(10, 0, 0): 180 case IP_VERSION(10, 0, 1): 181 psp_v10_0_set_psp_funcs(psp); 182 psp->autoload_supported = false; 183 psp->boot_time_tmr = false; 184 break; 185 case IP_VERSION(11, 0, 2): 186 case IP_VERSION(11, 0, 4): 187 psp_v11_0_set_psp_funcs(psp); 188 psp->autoload_supported = false; 189 psp->boot_time_tmr = false; 190 break; 191 case IP_VERSION(11, 0, 0): 192 case IP_VERSION(11, 0, 7): 193 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 194 fallthrough; 195 case IP_VERSION(11, 0, 5): 196 case IP_VERSION(11, 0, 9): 197 case IP_VERSION(11, 0, 11): 198 case IP_VERSION(11, 5, 0): 199 case IP_VERSION(11, 5, 2): 200 case IP_VERSION(11, 0, 12): 201 case IP_VERSION(11, 0, 13): 202 psp_v11_0_set_psp_funcs(psp); 203 psp->boot_time_tmr = false; 204 break; 205 case IP_VERSION(11, 0, 3): 206 case IP_VERSION(12, 0, 1): 207 psp_v12_0_set_psp_funcs(psp); 208 psp->autoload_supported = false; 209 psp->boot_time_tmr = false; 210 break; 211 case IP_VERSION(13, 0, 2): 212 psp->boot_time_tmr = false; 213 fallthrough; 214 case IP_VERSION(13, 0, 6): 215 case IP_VERSION(13, 0, 14): 216 psp_v13_0_set_psp_funcs(psp); 217 psp->autoload_supported = false; 218 break; 219 case IP_VERSION(13, 0, 12): 220 psp_v13_0_set_psp_funcs(psp); 221 psp->autoload_supported = false; 222 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 223 break; 224 case IP_VERSION(13, 0, 1): 225 case IP_VERSION(13, 0, 3): 226 case IP_VERSION(13, 0, 5): 227 case IP_VERSION(13, 0, 8): 228 case IP_VERSION(13, 0, 11): 229 case IP_VERSION(14, 0, 0): 230 case IP_VERSION(14, 0, 1): 231 case IP_VERSION(14, 0, 4): 232 psp_v13_0_set_psp_funcs(psp); 233 psp->boot_time_tmr = false; 234 break; 235 case IP_VERSION(11, 0, 8): 236 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 237 psp_v11_0_8_set_psp_funcs(psp); 238 } 239 psp->autoload_supported = false; 240 psp->boot_time_tmr = false; 241 break; 242 case IP_VERSION(13, 0, 0): 243 case IP_VERSION(13, 0, 7): 244 case IP_VERSION(13, 0, 10): 245 psp_v13_0_set_psp_funcs(psp); 246 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 247 psp->boot_time_tmr = false; 248 break; 249 case IP_VERSION(13, 0, 4): 250 psp_v13_0_4_set_psp_funcs(psp); 251 psp->boot_time_tmr = false; 252 break; 253 case IP_VERSION(14, 0, 2): 254 case IP_VERSION(14, 0, 3): 255 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 256 psp_v14_0_set_psp_funcs(psp); 257 break; 258 case IP_VERSION(14, 0, 5): 259 psp_v14_0_set_psp_funcs(psp); 260 psp->boot_time_tmr = false; 261 break; 262 default: 263 return -EINVAL; 264 } 265 266 psp->adev = adev; 267 268 adev->psp_timeout = 20000; 269 270 psp_check_pmfw_centralized_cstate_management(psp); 271 272 if (amdgpu_sriov_vf(adev)) 273 return psp_init_sriov_microcode(psp); 274 else 275 return psp_init_microcode(psp); 276 } 277 278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 279 { 280 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 281 &mem_ctx->shared_buf); 282 mem_ctx->shared_bo = NULL; 283 } 284 285 static void psp_free_shared_bufs(struct psp_context *psp) 286 { 287 void *tmr_buf; 288 void **pptr; 289 290 /* free TMR memory buffer */ 291 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 292 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 293 psp->tmr_bo = NULL; 294 295 /* free xgmi shared memory */ 296 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 297 298 /* free ras shared memory */ 299 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 300 301 /* free hdcp shared memory */ 302 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 303 304 /* free dtm shared memory */ 305 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 306 307 /* free rap shared memory */ 308 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 309 310 /* free securedisplay shared memory */ 311 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 312 313 314 } 315 316 static void psp_memory_training_fini(struct psp_context *psp) 317 { 318 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 319 320 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 321 kfree(ctx->sys_cache); 322 ctx->sys_cache = NULL; 323 } 324 325 static int psp_memory_training_init(struct psp_context *psp) 326 { 327 int ret; 328 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 329 330 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 331 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 332 return 0; 333 } 334 335 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 336 if (ctx->sys_cache == NULL) { 337 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 338 ret = -ENOMEM; 339 goto Err_out; 340 } 341 342 dev_dbg(psp->adev->dev, 343 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 344 ctx->train_data_size, 345 ctx->p2c_train_data_offset, 346 ctx->c2p_train_data_offset); 347 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 348 return 0; 349 350 Err_out: 351 psp_memory_training_fini(psp); 352 return ret; 353 } 354 355 /* 356 * Helper funciton to query psp runtime database entry 357 * 358 * @adev: amdgpu_device pointer 359 * @entry_type: the type of psp runtime database entry 360 * @db_entry: runtime database entry pointer 361 * 362 * Return false if runtime database doesn't exit or entry is invalid 363 * or true if the specific database entry is found, and copy to @db_entry 364 */ 365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 366 enum psp_runtime_entry_type entry_type, 367 void *db_entry) 368 { 369 uint64_t db_header_pos, db_dir_pos; 370 struct psp_runtime_data_header db_header = {0}; 371 struct psp_runtime_data_directory db_dir = {0}; 372 bool ret = false; 373 int i; 374 375 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 376 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 377 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 378 return false; 379 380 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 381 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 382 383 /* read runtime db header from vram */ 384 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 385 sizeof(struct psp_runtime_data_header), false); 386 387 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 388 /* runtime db doesn't exist, exit */ 389 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 390 return false; 391 } 392 393 /* read runtime database entry from vram */ 394 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 395 sizeof(struct psp_runtime_data_directory), false); 396 397 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 398 /* invalid db entry count, exit */ 399 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 400 return false; 401 } 402 403 /* look up for requested entry type */ 404 for (i = 0; i < db_dir.entry_count && !ret; i++) { 405 if (db_dir.entry_list[i].entry_type == entry_type) { 406 switch (entry_type) { 407 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 408 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 409 /* invalid db entry size */ 410 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 411 return false; 412 } 413 /* read runtime database entry */ 414 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 415 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 416 ret = true; 417 break; 418 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 419 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 420 /* invalid db entry size */ 421 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 422 return false; 423 } 424 /* read runtime database entry */ 425 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 426 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 427 ret = true; 428 break; 429 default: 430 ret = false; 431 break; 432 } 433 } 434 } 435 436 return ret; 437 } 438 439 static int psp_sw_init(struct amdgpu_ip_block *ip_block) 440 { 441 struct amdgpu_device *adev = ip_block->adev; 442 struct psp_context *psp = &adev->psp; 443 int ret; 444 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 445 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 446 struct psp_runtime_scpm_entry scpm_entry; 447 448 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 449 if (!psp->cmd) { 450 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 451 ret = -ENOMEM; 452 } 453 454 adev->psp.xgmi_context.supports_extended_data = 455 !adev->gmc.xgmi.connected_to_cpu && 456 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 457 458 memset(&scpm_entry, 0, sizeof(scpm_entry)); 459 if ((psp_get_runtime_db_entry(adev, 460 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 461 &scpm_entry)) && 462 (scpm_entry.scpm_status != SCPM_DISABLE)) { 463 adev->scpm_enabled = true; 464 adev->scpm_status = scpm_entry.scpm_status; 465 } else { 466 adev->scpm_enabled = false; 467 adev->scpm_status = SCPM_DISABLE; 468 } 469 470 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 471 472 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 473 if (psp_get_runtime_db_entry(adev, 474 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 475 &boot_cfg_entry)) { 476 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 477 if ((psp->boot_cfg_bitmask) & 478 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 479 /* If psp runtime database exists, then 480 * only enable two stage memory training 481 * when TWO_STAGE_DRAM_TRAINING bit is set 482 * in runtime database 483 */ 484 mem_training_ctx->enable_mem_training = true; 485 } 486 487 } else { 488 /* If psp runtime database doesn't exist or is 489 * invalid, force enable two stage memory training 490 */ 491 mem_training_ctx->enable_mem_training = true; 492 } 493 494 if (mem_training_ctx->enable_mem_training) { 495 ret = psp_memory_training_init(psp); 496 if (ret) { 497 dev_err(adev->dev, "Failed to initialize memory training!\n"); 498 return ret; 499 } 500 501 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 502 if (ret) { 503 dev_err(adev->dev, "Failed to process memory training!\n"); 504 return ret; 505 } 506 } 507 508 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 509 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 510 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 511 &psp->fw_pri_bo, 512 &psp->fw_pri_mc_addr, 513 &psp->fw_pri_buf); 514 if (ret) 515 return ret; 516 517 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 518 AMDGPU_GEM_DOMAIN_VRAM | 519 AMDGPU_GEM_DOMAIN_GTT, 520 &psp->fence_buf_bo, 521 &psp->fence_buf_mc_addr, 522 &psp->fence_buf); 523 if (ret) 524 goto failed1; 525 526 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 527 AMDGPU_GEM_DOMAIN_VRAM | 528 AMDGPU_GEM_DOMAIN_GTT, 529 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 530 (void **)&psp->cmd_buf_mem); 531 if (ret) 532 goto failed2; 533 534 return 0; 535 536 failed2: 537 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 538 &psp->fence_buf_mc_addr, &psp->fence_buf); 539 failed1: 540 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 541 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 542 return ret; 543 } 544 545 static int psp_sw_fini(struct amdgpu_ip_block *ip_block) 546 { 547 struct amdgpu_device *adev = ip_block->adev; 548 struct psp_context *psp = &adev->psp; 549 550 psp_memory_training_fini(psp); 551 552 amdgpu_ucode_release(&psp->sos_fw); 553 amdgpu_ucode_release(&psp->asd_fw); 554 amdgpu_ucode_release(&psp->ta_fw); 555 amdgpu_ucode_release(&psp->cap_fw); 556 amdgpu_ucode_release(&psp->toc_fw); 557 558 kfree(psp->cmd); 559 psp->cmd = NULL; 560 561 psp_free_shared_bufs(psp); 562 563 if (psp->km_ring.ring_mem) 564 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 565 &psp->km_ring.ring_mem_mc_addr, 566 (void **)&psp->km_ring.ring_mem); 567 568 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 569 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 570 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 571 &psp->fence_buf_mc_addr, &psp->fence_buf); 572 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 573 (void **)&psp->cmd_buf_mem); 574 575 return 0; 576 } 577 578 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 579 uint32_t reg_val, uint32_t mask, bool check_changed) 580 { 581 uint32_t val; 582 int i; 583 struct amdgpu_device *adev = psp->adev; 584 585 if (psp->adev->no_hw_access) 586 return 0; 587 588 for (i = 0; i < adev->usec_timeout; i++) { 589 val = RREG32(reg_index); 590 if (check_changed) { 591 if (val != reg_val) 592 return 0; 593 } else { 594 if ((val & mask) == reg_val) 595 return 0; 596 } 597 udelay(1); 598 } 599 600 return -ETIME; 601 } 602 603 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 604 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 605 { 606 uint32_t val; 607 int i; 608 struct amdgpu_device *adev = psp->adev; 609 610 if (psp->adev->no_hw_access) 611 return 0; 612 613 for (i = 0; i < msec_timeout; i++) { 614 val = RREG32(reg_index); 615 if ((val & mask) == reg_val) 616 return 0; 617 msleep(1); 618 } 619 620 return -ETIME; 621 } 622 623 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 624 { 625 switch (cmd_id) { 626 case GFX_CMD_ID_LOAD_TA: 627 return "LOAD_TA"; 628 case GFX_CMD_ID_UNLOAD_TA: 629 return "UNLOAD_TA"; 630 case GFX_CMD_ID_INVOKE_CMD: 631 return "INVOKE_CMD"; 632 case GFX_CMD_ID_LOAD_ASD: 633 return "LOAD_ASD"; 634 case GFX_CMD_ID_SETUP_TMR: 635 return "SETUP_TMR"; 636 case GFX_CMD_ID_LOAD_IP_FW: 637 return "LOAD_IP_FW"; 638 case GFX_CMD_ID_DESTROY_TMR: 639 return "DESTROY_TMR"; 640 case GFX_CMD_ID_SAVE_RESTORE: 641 return "SAVE_RESTORE_IP_FW"; 642 case GFX_CMD_ID_SETUP_VMR: 643 return "SETUP_VMR"; 644 case GFX_CMD_ID_DESTROY_VMR: 645 return "DESTROY_VMR"; 646 case GFX_CMD_ID_PROG_REG: 647 return "PROG_REG"; 648 case GFX_CMD_ID_GET_FW_ATTESTATION: 649 return "GET_FW_ATTESTATION"; 650 case GFX_CMD_ID_LOAD_TOC: 651 return "ID_LOAD_TOC"; 652 case GFX_CMD_ID_AUTOLOAD_RLC: 653 return "AUTOLOAD_RLC"; 654 case GFX_CMD_ID_BOOT_CFG: 655 return "BOOT_CFG"; 656 case GFX_CMD_ID_CONFIG_SQ_PERFMON: 657 return "CONFIG_SQ_PERFMON"; 658 case GFX_CMD_ID_FB_FW_RESERV_ADDR: 659 return "FB_FW_RESERV_ADDR"; 660 case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR: 661 return "FB_FW_RESERV_EXT_ADDR"; 662 default: 663 return "UNKNOWN CMD"; 664 } 665 } 666 667 static bool psp_err_warn(struct psp_context *psp) 668 { 669 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 670 671 /* This response indicates reg list is already loaded */ 672 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 673 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 674 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 675 cmd->resp.status == TEE_ERROR_CANCEL) 676 return false; 677 678 return true; 679 } 680 681 static int 682 psp_cmd_submit_buf(struct psp_context *psp, 683 struct amdgpu_firmware_info *ucode, 684 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 685 { 686 int ret; 687 int index; 688 int timeout = psp->adev->psp_timeout; 689 bool ras_intr = false; 690 bool skip_unsupport = false; 691 692 if (psp->adev->no_hw_access) 693 return 0; 694 695 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 696 697 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 698 699 index = atomic_inc_return(&psp->fence_value); 700 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 701 if (ret) { 702 atomic_dec(&psp->fence_value); 703 goto exit; 704 } 705 706 amdgpu_device_invalidate_hdp(psp->adev, NULL); 707 while (*((unsigned int *)psp->fence_buf) != index) { 708 if (--timeout == 0) 709 break; 710 /* 711 * Shouldn't wait for timeout when err_event_athub occurs, 712 * because gpu reset thread triggered and lock resource should 713 * be released for psp resume sequence. 714 */ 715 ras_intr = amdgpu_ras_intr_triggered(); 716 if (ras_intr) 717 break; 718 usleep_range(10, 100); 719 amdgpu_device_invalidate_hdp(psp->adev, NULL); 720 } 721 722 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 723 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 724 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 725 726 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 727 728 /* In some cases, psp response status is not 0 even there is no 729 * problem while the command is submitted. Some version of PSP FW 730 * doesn't write 0 to that field. 731 * So here we would like to only print a warning instead of an error 732 * during psp initialization to avoid breaking hw_init and it doesn't 733 * return -EINVAL. 734 */ 735 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 736 if (ucode) 737 dev_warn(psp->adev->dev, 738 "failed to load ucode %s(0x%X) ", 739 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 740 if (psp_err_warn(psp)) 741 dev_warn( 742 psp->adev->dev, 743 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 744 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 745 psp->cmd_buf_mem->cmd_id, 746 psp->cmd_buf_mem->resp.status); 747 /* If any firmware (including CAP) load fails under SRIOV, it should 748 * return failure to stop the VF from initializing. 749 * Also return failure in case of timeout 750 */ 751 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 752 ret = -EINVAL; 753 goto exit; 754 } 755 } 756 757 if (ucode) { 758 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 759 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 760 } 761 762 exit: 763 return ret; 764 } 765 766 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 767 { 768 struct psp_gfx_cmd_resp *cmd = psp->cmd; 769 770 mutex_lock(&psp->mutex); 771 772 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 773 774 return cmd; 775 } 776 777 static void release_psp_cmd_buf(struct psp_context *psp) 778 { 779 mutex_unlock(&psp->mutex); 780 } 781 782 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 783 struct psp_gfx_cmd_resp *cmd, 784 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 785 { 786 struct amdgpu_device *adev = psp->adev; 787 uint32_t size = 0; 788 uint64_t tmr_pa = 0; 789 790 if (tmr_bo) { 791 size = amdgpu_bo_size(tmr_bo); 792 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 793 } 794 795 if (amdgpu_sriov_vf(psp->adev)) 796 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 797 else 798 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 799 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 800 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 801 cmd->cmd.cmd_setup_tmr.buf_size = size; 802 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 803 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 804 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 805 } 806 807 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 808 uint64_t pri_buf_mc, uint32_t size) 809 { 810 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 811 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 812 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 813 cmd->cmd.cmd_load_toc.toc_size = size; 814 } 815 816 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 817 static int psp_load_toc(struct psp_context *psp, 818 uint32_t *tmr_size) 819 { 820 int ret; 821 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 822 823 /* Copy toc to psp firmware private buffer */ 824 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 825 826 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 827 828 ret = psp_cmd_submit_buf(psp, NULL, cmd, 829 psp->fence_buf_mc_addr); 830 if (!ret) 831 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 832 833 release_psp_cmd_buf(psp); 834 835 return ret; 836 } 837 838 /* Set up Trusted Memory Region */ 839 static int psp_tmr_init(struct psp_context *psp) 840 { 841 int ret = 0; 842 int tmr_size; 843 void *tmr_buf; 844 void **pptr; 845 846 /* 847 * According to HW engineer, they prefer the TMR address be "naturally 848 * aligned" , e.g. the start address be an integer divide of TMR size. 849 * 850 * Note: this memory need be reserved till the driver 851 * uninitializes. 852 */ 853 tmr_size = PSP_TMR_SIZE(psp->adev); 854 855 /* For ASICs support RLC autoload, psp will parse the toc 856 * and calculate the total size of TMR needed 857 */ 858 if (!amdgpu_sriov_vf(psp->adev) && 859 psp->toc.start_addr && 860 psp->toc.size_bytes && 861 psp->fw_pri_buf) { 862 ret = psp_load_toc(psp, &tmr_size); 863 if (ret) { 864 dev_err(psp->adev->dev, "Failed to load toc\n"); 865 return ret; 866 } 867 } 868 869 if (!psp->tmr_bo && !psp->boot_time_tmr) { 870 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 871 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 872 PSP_TMR_ALIGNMENT, 873 AMDGPU_HAS_VRAM(psp->adev) ? 874 AMDGPU_GEM_DOMAIN_VRAM : 875 AMDGPU_GEM_DOMAIN_GTT, 876 &psp->tmr_bo, &psp->tmr_mc_addr, 877 pptr); 878 } 879 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo) 880 psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo); 881 882 return ret; 883 } 884 885 static bool psp_skip_tmr(struct psp_context *psp) 886 { 887 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 888 case IP_VERSION(11, 0, 9): 889 case IP_VERSION(11, 0, 7): 890 case IP_VERSION(13, 0, 2): 891 case IP_VERSION(13, 0, 6): 892 case IP_VERSION(13, 0, 10): 893 case IP_VERSION(13, 0, 12): 894 case IP_VERSION(13, 0, 14): 895 return true; 896 default: 897 return false; 898 } 899 } 900 901 static int psp_tmr_load(struct psp_context *psp) 902 { 903 int ret; 904 struct psp_gfx_cmd_resp *cmd; 905 906 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 907 * Already set up by host driver. 908 */ 909 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 910 return 0; 911 912 cmd = acquire_psp_cmd_buf(psp); 913 914 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 915 if (psp->tmr_bo) 916 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 917 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 918 919 ret = psp_cmd_submit_buf(psp, NULL, cmd, 920 psp->fence_buf_mc_addr); 921 922 release_psp_cmd_buf(psp); 923 924 return ret; 925 } 926 927 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 928 struct psp_gfx_cmd_resp *cmd) 929 { 930 if (amdgpu_sriov_vf(psp->adev)) 931 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 932 else 933 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 934 } 935 936 static int psp_tmr_unload(struct psp_context *psp) 937 { 938 int ret; 939 struct psp_gfx_cmd_resp *cmd; 940 941 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 942 * as TMR is not loaded at all 943 */ 944 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 945 return 0; 946 947 cmd = acquire_psp_cmd_buf(psp); 948 949 psp_prep_tmr_unload_cmd_buf(psp, cmd); 950 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 951 952 ret = psp_cmd_submit_buf(psp, NULL, cmd, 953 psp->fence_buf_mc_addr); 954 955 release_psp_cmd_buf(psp); 956 957 return ret; 958 } 959 960 static int psp_tmr_terminate(struct psp_context *psp) 961 { 962 return psp_tmr_unload(psp); 963 } 964 965 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 966 uint64_t *output_ptr) 967 { 968 int ret; 969 struct psp_gfx_cmd_resp *cmd; 970 971 if (!output_ptr) 972 return -EINVAL; 973 974 if (amdgpu_sriov_vf(psp->adev)) 975 return 0; 976 977 cmd = acquire_psp_cmd_buf(psp); 978 979 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 980 981 ret = psp_cmd_submit_buf(psp, NULL, cmd, 982 psp->fence_buf_mc_addr); 983 984 if (!ret) { 985 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 986 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 987 } 988 989 release_psp_cmd_buf(psp); 990 991 return ret; 992 } 993 994 static int psp_get_fw_reservation_info(struct psp_context *psp, 995 uint32_t cmd_id, 996 uint64_t *addr, 997 uint32_t *size) 998 { 999 int ret; 1000 uint32_t status; 1001 struct psp_gfx_cmd_resp *cmd; 1002 1003 cmd = acquire_psp_cmd_buf(psp); 1004 1005 cmd->cmd_id = cmd_id; 1006 1007 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1008 psp->fence_buf_mc_addr); 1009 if (ret) { 1010 release_psp_cmd_buf(psp); 1011 return ret; 1012 } 1013 1014 status = cmd->resp.status; 1015 if (status == PSP_ERR_UNKNOWN_COMMAND) { 1016 release_psp_cmd_buf(psp); 1017 *addr = 0; 1018 *size = 0; 1019 return 0; 1020 } 1021 1022 *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 | 1023 cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo; 1024 *size = cmd->resp.uresp.fw_reserve_info.reserve_size; 1025 1026 release_psp_cmd_buf(psp); 1027 1028 return 0; 1029 } 1030 1031 int psp_update_fw_reservation(struct psp_context *psp) 1032 { 1033 int ret; 1034 uint64_t reserv_addr, reserv_addr_ext; 1035 uint32_t reserv_size, reserv_size_ext; 1036 struct amdgpu_device *adev = psp->adev; 1037 1038 if (amdgpu_sriov_vf(psp->adev)) 1039 return 0; 1040 1041 if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) && 1042 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3))) 1043 return 0; 1044 1045 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); 1046 if (ret) 1047 return ret; 1048 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext); 1049 if (ret) 1050 return ret; 1051 1052 if (reserv_addr != adev->gmc.real_vram_size - reserv_size) { 1053 dev_warn(adev->dev, "reserve fw region is not valid!\n"); 1054 return 0; 1055 } 1056 1057 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1058 1059 reserv_size = roundup(reserv_size, SZ_1M); 1060 1061 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL); 1062 if (ret) { 1063 dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret); 1064 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1065 return ret; 1066 } 1067 1068 reserv_size_ext = roundup(reserv_size_ext, SZ_1M); 1069 1070 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext, 1071 &adev->mman.fw_reserved_memory_extend, NULL); 1072 if (ret) { 1073 dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret); 1074 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL); 1075 return ret; 1076 } 1077 1078 return 0; 1079 } 1080 1081 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 1082 { 1083 struct psp_context *psp = &adev->psp; 1084 struct psp_gfx_cmd_resp *cmd; 1085 int ret; 1086 1087 if (amdgpu_sriov_vf(adev)) 1088 return 0; 1089 1090 cmd = acquire_psp_cmd_buf(psp); 1091 1092 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1093 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 1094 1095 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1096 if (!ret) { 1097 *boot_cfg = 1098 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 1099 } 1100 1101 release_psp_cmd_buf(psp); 1102 1103 return ret; 1104 } 1105 1106 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 1107 { 1108 int ret; 1109 struct psp_context *psp = &adev->psp; 1110 struct psp_gfx_cmd_resp *cmd; 1111 1112 if (amdgpu_sriov_vf(adev)) 1113 return 0; 1114 1115 cmd = acquire_psp_cmd_buf(psp); 1116 1117 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1118 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 1119 cmd->cmd.boot_cfg.boot_config = boot_cfg; 1120 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 1121 1122 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1123 1124 release_psp_cmd_buf(psp); 1125 1126 return ret; 1127 } 1128 1129 static int psp_rl_load(struct amdgpu_device *adev) 1130 { 1131 int ret; 1132 struct psp_context *psp = &adev->psp; 1133 struct psp_gfx_cmd_resp *cmd; 1134 1135 if (!is_psp_fw_valid(psp->rl)) 1136 return 0; 1137 1138 cmd = acquire_psp_cmd_buf(psp); 1139 1140 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1141 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1142 1143 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1144 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1145 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1146 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1147 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1148 1149 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1150 1151 release_psp_cmd_buf(psp); 1152 1153 return ret; 1154 } 1155 1156 int psp_memory_partition(struct psp_context *psp, int mode) 1157 { 1158 struct psp_gfx_cmd_resp *cmd; 1159 int ret; 1160 1161 if (amdgpu_sriov_vf(psp->adev)) 1162 return 0; 1163 1164 cmd = acquire_psp_cmd_buf(psp); 1165 1166 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; 1167 cmd->cmd.cmd_memory_part.mode = mode; 1168 1169 dev_info(psp->adev->dev, 1170 "Requesting %d memory partition change through PSP", mode); 1171 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1172 if (ret) 1173 dev_err(psp->adev->dev, 1174 "PSP request failed to change to NPS%d mode\n", mode); 1175 1176 release_psp_cmd_buf(psp); 1177 1178 return ret; 1179 } 1180 1181 int psp_spatial_partition(struct psp_context *psp, int mode) 1182 { 1183 struct psp_gfx_cmd_resp *cmd; 1184 int ret; 1185 1186 if (amdgpu_sriov_vf(psp->adev)) 1187 return 0; 1188 1189 cmd = acquire_psp_cmd_buf(psp); 1190 1191 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1192 cmd->cmd.cmd_spatial_part.mode = mode; 1193 1194 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1195 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1196 1197 release_psp_cmd_buf(psp); 1198 1199 return ret; 1200 } 1201 1202 static int psp_asd_initialize(struct psp_context *psp) 1203 { 1204 int ret; 1205 1206 /* If PSP version doesn't match ASD version, asd loading will be failed. 1207 * add workaround to bypass it for sriov now. 1208 * TODO: add version check to make it common 1209 */ 1210 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1211 return 0; 1212 1213 /* bypass asd if display hardware is not available */ 1214 if (!amdgpu_device_has_display_hardware(psp->adev) && 1215 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1216 return 0; 1217 1218 psp->asd_context.mem_context.shared_mc_addr = 0; 1219 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1220 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1221 1222 ret = psp_ta_load(psp, &psp->asd_context); 1223 if (!ret) 1224 psp->asd_context.initialized = true; 1225 1226 return ret; 1227 } 1228 1229 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1230 uint32_t session_id) 1231 { 1232 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1233 cmd->cmd.cmd_unload_ta.session_id = session_id; 1234 } 1235 1236 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1237 { 1238 int ret; 1239 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1240 1241 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1242 1243 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1244 1245 context->resp_status = cmd->resp.status; 1246 1247 release_psp_cmd_buf(psp); 1248 1249 return ret; 1250 } 1251 1252 static int psp_asd_terminate(struct psp_context *psp) 1253 { 1254 int ret; 1255 1256 if (amdgpu_sriov_vf(psp->adev)) 1257 return 0; 1258 1259 if (!psp->asd_context.initialized) 1260 return 0; 1261 1262 ret = psp_ta_unload(psp, &psp->asd_context); 1263 if (!ret) 1264 psp->asd_context.initialized = false; 1265 1266 return ret; 1267 } 1268 1269 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1270 uint32_t id, uint32_t value) 1271 { 1272 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1273 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1274 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1275 } 1276 1277 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1278 uint32_t value) 1279 { 1280 struct psp_gfx_cmd_resp *cmd; 1281 int ret = 0; 1282 1283 if (reg >= PSP_REG_LAST) 1284 return -EINVAL; 1285 1286 cmd = acquire_psp_cmd_buf(psp); 1287 1288 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1289 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1290 if (ret) 1291 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1292 1293 release_psp_cmd_buf(psp); 1294 1295 return ret; 1296 } 1297 1298 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1299 uint64_t ta_bin_mc, 1300 struct ta_context *context) 1301 { 1302 cmd->cmd_id = context->ta_load_type; 1303 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1304 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1305 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1306 1307 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1308 lower_32_bits(context->mem_context.shared_mc_addr); 1309 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1310 upper_32_bits(context->mem_context.shared_mc_addr); 1311 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1312 } 1313 1314 int psp_ta_init_shared_buf(struct psp_context *psp, 1315 struct ta_mem_context *mem_ctx) 1316 { 1317 /* 1318 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1319 * physical) for ta to host memory 1320 */ 1321 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1322 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1323 AMDGPU_GEM_DOMAIN_GTT, 1324 &mem_ctx->shared_bo, 1325 &mem_ctx->shared_mc_addr, 1326 &mem_ctx->shared_buf); 1327 } 1328 1329 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1330 uint32_t ta_cmd_id, 1331 uint32_t session_id) 1332 { 1333 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1334 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1335 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1336 } 1337 1338 int psp_ta_invoke(struct psp_context *psp, 1339 uint32_t ta_cmd_id, 1340 struct ta_context *context) 1341 { 1342 int ret; 1343 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1344 1345 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1346 1347 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1348 psp->fence_buf_mc_addr); 1349 1350 context->resp_status = cmd->resp.status; 1351 1352 release_psp_cmd_buf(psp); 1353 1354 return ret; 1355 } 1356 1357 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1358 { 1359 int ret; 1360 struct psp_gfx_cmd_resp *cmd; 1361 1362 cmd = acquire_psp_cmd_buf(psp); 1363 1364 psp_copy_fw(psp, context->bin_desc.start_addr, 1365 context->bin_desc.size_bytes); 1366 1367 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && 1368 context->mem_context.shared_bo) 1369 context->mem_context.shared_mc_addr = 1370 amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo); 1371 1372 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1373 1374 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1375 psp->fence_buf_mc_addr); 1376 1377 context->resp_status = cmd->resp.status; 1378 1379 if (!ret) 1380 context->session_id = cmd->resp.session_id; 1381 1382 release_psp_cmd_buf(psp); 1383 1384 return ret; 1385 } 1386 1387 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1388 { 1389 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1390 } 1391 1392 int psp_xgmi_terminate(struct psp_context *psp) 1393 { 1394 int ret; 1395 struct amdgpu_device *adev = psp->adev; 1396 1397 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1398 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1399 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1400 adev->gmc.xgmi.connected_to_cpu)) 1401 return 0; 1402 1403 if (!psp->xgmi_context.context.initialized) 1404 return 0; 1405 1406 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1407 1408 psp->xgmi_context.context.initialized = false; 1409 1410 return ret; 1411 } 1412 1413 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1414 { 1415 struct ta_xgmi_shared_memory *xgmi_cmd; 1416 int ret; 1417 1418 if (!psp->ta_fw || 1419 !psp->xgmi_context.context.bin_desc.size_bytes || 1420 !psp->xgmi_context.context.bin_desc.start_addr) 1421 return -ENOENT; 1422 1423 if (!load_ta) 1424 goto invoke; 1425 1426 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1427 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1428 1429 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1430 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1431 if (ret) 1432 return ret; 1433 } 1434 1435 /* Load XGMI TA */ 1436 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1437 if (!ret) 1438 psp->xgmi_context.context.initialized = true; 1439 else 1440 return ret; 1441 1442 invoke: 1443 /* Initialize XGMI session */ 1444 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1445 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1446 xgmi_cmd->flag_extend_link_record = set_extended_data; 1447 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1448 1449 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1450 /* note down the capbility flag for XGMI TA */ 1451 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1452 1453 return ret; 1454 } 1455 1456 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1457 { 1458 struct ta_xgmi_shared_memory *xgmi_cmd; 1459 int ret; 1460 1461 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1462 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1463 1464 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1465 1466 /* Invoke xgmi ta to get hive id */ 1467 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1468 if (ret) 1469 return ret; 1470 1471 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1472 1473 return 0; 1474 } 1475 1476 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1477 { 1478 struct ta_xgmi_shared_memory *xgmi_cmd; 1479 int ret; 1480 1481 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1482 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1483 1484 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1485 1486 /* Invoke xgmi ta to get the node id */ 1487 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1488 if (ret) 1489 return ret; 1490 1491 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1492 1493 return 0; 1494 } 1495 1496 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1497 { 1498 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1499 IP_VERSION(13, 0, 2) && 1500 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1501 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1502 IP_VERSION(13, 0, 6); 1503 } 1504 1505 /* 1506 * Chips that support extended topology information require the driver to 1507 * reflect topology information in the opposite direction. This is 1508 * because the TA has already exceeded its link record limit and if the 1509 * TA holds bi-directional information, the driver would have to do 1510 * multiple fetches instead of just two. 1511 */ 1512 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1513 struct psp_xgmi_node_info node_info) 1514 { 1515 struct amdgpu_device *mirror_adev; 1516 struct amdgpu_hive_info *hive; 1517 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1518 uint64_t dst_node_id = node_info.node_id; 1519 uint8_t dst_num_hops = node_info.num_hops; 1520 uint8_t dst_num_links = node_info.num_links; 1521 1522 hive = amdgpu_get_xgmi_hive(psp->adev); 1523 if (WARN_ON(!hive)) 1524 return; 1525 1526 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1527 struct psp_xgmi_topology_info *mirror_top_info; 1528 int j; 1529 1530 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1531 continue; 1532 1533 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1534 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1535 if (mirror_top_info->nodes[j].node_id != src_node_id) 1536 continue; 1537 1538 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1539 /* 1540 * prevent 0 num_links value re-reflection since reflection 1541 * criteria is based on num_hops (direct or indirect). 1542 * 1543 */ 1544 if (dst_num_links) 1545 mirror_top_info->nodes[j].num_links = dst_num_links; 1546 1547 break; 1548 } 1549 1550 break; 1551 } 1552 1553 amdgpu_put_xgmi_hive(hive); 1554 } 1555 1556 int psp_xgmi_get_topology_info(struct psp_context *psp, 1557 int number_devices, 1558 struct psp_xgmi_topology_info *topology, 1559 bool get_extended_data) 1560 { 1561 struct ta_xgmi_shared_memory *xgmi_cmd; 1562 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1563 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1564 int i; 1565 int ret; 1566 1567 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1568 return -EINVAL; 1569 1570 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1571 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1572 xgmi_cmd->flag_extend_link_record = get_extended_data; 1573 1574 /* Fill in the shared memory with topology information as input */ 1575 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1576 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1577 topology_info_input->num_nodes = number_devices; 1578 1579 for (i = 0; i < topology_info_input->num_nodes; i++) { 1580 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1581 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1582 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1583 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1584 } 1585 1586 /* Invoke xgmi ta to get the topology information */ 1587 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1588 if (ret) 1589 return ret; 1590 1591 /* Read the output topology information from the shared memory */ 1592 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1593 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1594 for (i = 0; i < topology->num_nodes; i++) { 1595 /* extended data will either be 0 or equal to non-extended data */ 1596 if (topology_info_output->nodes[i].num_hops) 1597 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1598 1599 /* non-extended data gets everything here so no need to update */ 1600 if (!get_extended_data) { 1601 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1602 topology->nodes[i].is_sharing_enabled = 1603 topology_info_output->nodes[i].is_sharing_enabled; 1604 topology->nodes[i].sdma_engine = 1605 topology_info_output->nodes[i].sdma_engine; 1606 } 1607 1608 } 1609 1610 /* Invoke xgmi ta again to get the link information */ 1611 if (psp_xgmi_peer_link_info_supported(psp)) { 1612 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1613 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1614 bool requires_reflection = 1615 (psp->xgmi_context.supports_extended_data && 1616 get_extended_data) || 1617 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1618 IP_VERSION(13, 0, 6) || 1619 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1620 IP_VERSION(13, 0, 14); 1621 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1622 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1623 1624 /* popluate the shared output buffer rather than the cmd input buffer 1625 * with node_ids as the input for GET_PEER_LINKS command execution. 1626 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1627 * The same requirement for GET_EXTEND_PEER_LINKS command. 1628 */ 1629 if (ta_port_num_support) { 1630 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1631 1632 for (i = 0; i < topology->num_nodes; i++) 1633 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1634 1635 link_extend_info_output->num_nodes = topology->num_nodes; 1636 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1637 } else { 1638 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1639 1640 for (i = 0; i < topology->num_nodes; i++) 1641 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1642 1643 link_info_output->num_nodes = topology->num_nodes; 1644 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1645 } 1646 1647 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1648 if (ret) 1649 return ret; 1650 1651 for (i = 0; i < topology->num_nodes; i++) { 1652 uint8_t node_num_links = ta_port_num_support ? 1653 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1654 /* accumulate num_links on extended data */ 1655 if (get_extended_data) { 1656 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1657 } else { 1658 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1659 topology->nodes[i].num_links : node_num_links; 1660 } 1661 /* popluate the connected port num info if supported and available */ 1662 if (ta_port_num_support && topology->nodes[i].num_links) { 1663 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1664 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1665 } 1666 1667 /* reflect the topology information for bi-directionality */ 1668 if (requires_reflection && topology->nodes[i].num_hops) 1669 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1670 } 1671 } 1672 1673 return 0; 1674 } 1675 1676 int psp_xgmi_set_topology_info(struct psp_context *psp, 1677 int number_devices, 1678 struct psp_xgmi_topology_info *topology) 1679 { 1680 struct ta_xgmi_shared_memory *xgmi_cmd; 1681 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1682 int i; 1683 1684 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1685 return -EINVAL; 1686 1687 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1688 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1689 1690 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1691 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1692 topology_info_input->num_nodes = number_devices; 1693 1694 for (i = 0; i < topology_info_input->num_nodes; i++) { 1695 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1696 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1697 topology_info_input->nodes[i].is_sharing_enabled = 1; 1698 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1699 } 1700 1701 /* Invoke xgmi ta to set topology information */ 1702 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1703 } 1704 1705 // ras begin 1706 static void psp_ras_ta_check_status(struct psp_context *psp) 1707 { 1708 struct ta_ras_shared_memory *ras_cmd = 1709 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1710 1711 switch (ras_cmd->ras_status) { 1712 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1713 dev_warn(psp->adev->dev, 1714 "RAS WARNING: cmd failed due to unsupported ip\n"); 1715 break; 1716 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1717 dev_warn(psp->adev->dev, 1718 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1719 break; 1720 case TA_RAS_STATUS__SUCCESS: 1721 break; 1722 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1723 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1724 dev_warn(psp->adev->dev, 1725 "RAS WARNING: Inject error to critical region is not allowed\n"); 1726 break; 1727 default: 1728 dev_warn(psp->adev->dev, 1729 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1730 break; 1731 } 1732 } 1733 1734 static int psp_ras_send_cmd(struct psp_context *psp, 1735 enum ras_command cmd_id, void *in, void *out) 1736 { 1737 struct ta_ras_shared_memory *ras_cmd; 1738 uint32_t cmd = cmd_id; 1739 int ret = 0; 1740 1741 if (!in) 1742 return -EINVAL; 1743 1744 mutex_lock(&psp->ras_context.mutex); 1745 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1746 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1747 1748 switch (cmd) { 1749 case TA_RAS_COMMAND__ENABLE_FEATURES: 1750 case TA_RAS_COMMAND__DISABLE_FEATURES: 1751 memcpy(&ras_cmd->ras_in_message, 1752 in, sizeof(ras_cmd->ras_in_message)); 1753 break; 1754 case TA_RAS_COMMAND__TRIGGER_ERROR: 1755 memcpy(&ras_cmd->ras_in_message.trigger_error, 1756 in, sizeof(ras_cmd->ras_in_message.trigger_error)); 1757 break; 1758 case TA_RAS_COMMAND__QUERY_ADDRESS: 1759 memcpy(&ras_cmd->ras_in_message.address, 1760 in, sizeof(ras_cmd->ras_in_message.address)); 1761 break; 1762 default: 1763 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd); 1764 ret = -EINVAL; 1765 goto err_out; 1766 } 1767 1768 ras_cmd->cmd_id = cmd; 1769 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1770 1771 switch (cmd) { 1772 case TA_RAS_COMMAND__TRIGGER_ERROR: 1773 if (!ret && out) 1774 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status)); 1775 break; 1776 case TA_RAS_COMMAND__QUERY_ADDRESS: 1777 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1778 ret = -EINVAL; 1779 else if (out) 1780 memcpy(out, 1781 &ras_cmd->ras_out_message.address, 1782 sizeof(ras_cmd->ras_out_message.address)); 1783 break; 1784 default: 1785 break; 1786 } 1787 1788 err_out: 1789 mutex_unlock(&psp->ras_context.mutex); 1790 1791 return ret; 1792 } 1793 1794 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1795 { 1796 struct ta_ras_shared_memory *ras_cmd; 1797 int ret; 1798 1799 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1800 1801 /* 1802 * TODO: bypass the loading in sriov for now 1803 */ 1804 if (amdgpu_sriov_vf(psp->adev)) 1805 return 0; 1806 1807 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1808 1809 if (amdgpu_ras_intr_triggered()) 1810 return ret; 1811 1812 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1813 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1814 return -EINVAL; 1815 } 1816 1817 if (!ret) { 1818 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1819 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1820 1821 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1822 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1823 dev_warn(psp->adev->dev, 1824 "RAS internal register access blocked\n"); 1825 1826 psp_ras_ta_check_status(psp); 1827 } 1828 1829 return ret; 1830 } 1831 1832 int psp_ras_enable_features(struct psp_context *psp, 1833 union ta_ras_cmd_input *info, bool enable) 1834 { 1835 enum ras_command cmd_id; 1836 int ret; 1837 1838 if (!psp->ras_context.context.initialized || !info) 1839 return -EINVAL; 1840 1841 cmd_id = enable ? 1842 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES; 1843 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL); 1844 if (ret) 1845 return -EINVAL; 1846 1847 return 0; 1848 } 1849 1850 int psp_ras_terminate(struct psp_context *psp) 1851 { 1852 int ret; 1853 1854 /* 1855 * TODO: bypass the terminate in sriov for now 1856 */ 1857 if (amdgpu_sriov_vf(psp->adev)) 1858 return 0; 1859 1860 if (!psp->ras_context.context.initialized) 1861 return 0; 1862 1863 ret = psp_ta_unload(psp, &psp->ras_context.context); 1864 1865 psp->ras_context.context.initialized = false; 1866 1867 mutex_destroy(&psp->ras_context.mutex); 1868 1869 return ret; 1870 } 1871 1872 int psp_ras_initialize(struct psp_context *psp) 1873 { 1874 int ret; 1875 uint32_t boot_cfg = 0xFF; 1876 struct amdgpu_device *adev = psp->adev; 1877 struct ta_ras_shared_memory *ras_cmd; 1878 1879 /* 1880 * TODO: bypass the initialize in sriov for now 1881 */ 1882 if (amdgpu_sriov_vf(adev)) 1883 return 0; 1884 1885 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1886 !adev->psp.ras_context.context.bin_desc.start_addr) { 1887 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1888 return 0; 1889 } 1890 1891 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1892 /* query GECC enablement status from boot config 1893 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1894 */ 1895 ret = psp_boot_config_get(adev, &boot_cfg); 1896 if (ret) 1897 dev_warn(adev->dev, "PSP get boot config failed\n"); 1898 1899 if (boot_cfg == 1 && !adev->ras_default_ecc_enabled && 1900 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1901 dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n"); 1902 dev_warn(adev->dev, 1903 "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n"); 1904 } else { 1905 if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) && 1906 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1907 if (boot_cfg == 1) { 1908 dev_info(adev->dev, "GECC is enabled\n"); 1909 } else { 1910 /* enable GECC in next boot cycle if it is disabled 1911 * in boot config, or force enable GECC if failed to 1912 * get boot configuration 1913 */ 1914 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1915 if (ret) 1916 dev_warn(adev->dev, "PSP set boot config failed\n"); 1917 else 1918 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1919 } 1920 } else { 1921 if (!boot_cfg) { 1922 if (!adev->ras_default_ecc_enabled && 1923 amdgpu_ras_enable != 1 && 1924 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 1925 dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n"); 1926 else 1927 dev_info(adev->dev, "GECC is disabled\n"); 1928 } else { 1929 /* disable GECC in next boot cycle if ras is 1930 * disabled by module parameter amdgpu_ras_enable 1931 * and/or amdgpu_ras_mask, or boot_config_get call 1932 * is failed 1933 */ 1934 ret = psp_boot_config_set(adev, 0); 1935 if (ret) 1936 dev_warn(adev->dev, "PSP set boot config failed\n"); 1937 else 1938 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1939 } 1940 } 1941 } 1942 } 1943 1944 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1945 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1946 1947 if (!psp->ras_context.context.mem_context.shared_buf) { 1948 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1949 if (ret) 1950 return ret; 1951 } 1952 1953 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1954 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1955 1956 if (amdgpu_ras_is_poison_mode_supported(adev)) 1957 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1958 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1959 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1960 ras_cmd->ras_in_message.init_flags.xcc_mask = 1961 adev->gfx.xcc_mask; 1962 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1963 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 1964 ras_cmd->ras_in_message.init_flags.nps_mode = 1965 adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 1966 ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask; 1967 1968 ret = psp_ta_load(psp, &psp->ras_context.context); 1969 1970 if (!ret && !ras_cmd->ras_status) { 1971 psp->ras_context.context.initialized = true; 1972 mutex_init(&psp->ras_context.mutex); 1973 } else { 1974 if (ras_cmd->ras_status) 1975 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1976 1977 /* fail to load RAS TA */ 1978 psp->ras_context.context.initialized = false; 1979 } 1980 1981 return ret; 1982 } 1983 1984 int psp_ras_trigger_error(struct psp_context *psp, 1985 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 1986 { 1987 struct amdgpu_device *adev = psp->adev; 1988 int ret; 1989 uint32_t dev_mask; 1990 uint32_t ras_status = 0; 1991 1992 if (!psp->ras_context.context.initialized || !info) 1993 return -EINVAL; 1994 1995 switch (info->block_id) { 1996 case TA_RAS_BLOCK__GFX: 1997 dev_mask = GET_MASK(GC, instance_mask); 1998 break; 1999 case TA_RAS_BLOCK__SDMA: 2000 dev_mask = GET_MASK(SDMA0, instance_mask); 2001 break; 2002 case TA_RAS_BLOCK__VCN: 2003 case TA_RAS_BLOCK__JPEG: 2004 dev_mask = GET_MASK(VCN, instance_mask); 2005 break; 2006 default: 2007 dev_mask = instance_mask; 2008 break; 2009 } 2010 2011 /* reuse sub_block_index for backward compatibility */ 2012 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 2013 dev_mask &= AMDGPU_RAS_INST_MASK; 2014 info->sub_block_index |= dev_mask; 2015 2016 ret = psp_ras_send_cmd(psp, 2017 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status); 2018 if (ret) 2019 return -EINVAL; 2020 2021 /* If err_event_athub occurs error inject was successful, however 2022 * return status from TA is no long reliable 2023 */ 2024 if (amdgpu_ras_intr_triggered()) 2025 return 0; 2026 2027 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 2028 return -EACCES; 2029 else if (ras_status) 2030 return -EINVAL; 2031 2032 return 0; 2033 } 2034 2035 int psp_ras_query_address(struct psp_context *psp, 2036 struct ta_ras_query_address_input *addr_in, 2037 struct ta_ras_query_address_output *addr_out) 2038 { 2039 int ret; 2040 2041 if (!psp->ras_context.context.initialized || 2042 !addr_in || !addr_out) 2043 return -EINVAL; 2044 2045 ret = psp_ras_send_cmd(psp, 2046 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out); 2047 2048 return ret; 2049 } 2050 // ras end 2051 2052 // HDCP start 2053 static int psp_hdcp_initialize(struct psp_context *psp) 2054 { 2055 int ret; 2056 2057 /* 2058 * TODO: bypass the initialize in sriov for now 2059 */ 2060 if (amdgpu_sriov_vf(psp->adev)) 2061 return 0; 2062 2063 /* bypass hdcp initialization if dmu is harvested */ 2064 if (!amdgpu_device_has_display_hardware(psp->adev)) 2065 return 0; 2066 2067 if (!psp->hdcp_context.context.bin_desc.size_bytes || 2068 !psp->hdcp_context.context.bin_desc.start_addr) { 2069 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 2070 return 0; 2071 } 2072 2073 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 2074 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2075 2076 if (!psp->hdcp_context.context.mem_context.shared_buf) { 2077 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 2078 if (ret) 2079 return ret; 2080 } 2081 2082 ret = psp_ta_load(psp, &psp->hdcp_context.context); 2083 if (!ret) { 2084 psp->hdcp_context.context.initialized = true; 2085 mutex_init(&psp->hdcp_context.mutex); 2086 } 2087 2088 return ret; 2089 } 2090 2091 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2092 { 2093 /* 2094 * TODO: bypass the loading in sriov for now 2095 */ 2096 if (amdgpu_sriov_vf(psp->adev)) 2097 return 0; 2098 2099 if (!psp->hdcp_context.context.initialized) 2100 return 0; 2101 2102 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 2103 } 2104 2105 static int psp_hdcp_terminate(struct psp_context *psp) 2106 { 2107 int ret; 2108 2109 /* 2110 * TODO: bypass the terminate in sriov for now 2111 */ 2112 if (amdgpu_sriov_vf(psp->adev)) 2113 return 0; 2114 2115 if (!psp->hdcp_context.context.initialized) 2116 return 0; 2117 2118 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 2119 2120 psp->hdcp_context.context.initialized = false; 2121 2122 return ret; 2123 } 2124 // HDCP end 2125 2126 // DTM start 2127 static int psp_dtm_initialize(struct psp_context *psp) 2128 { 2129 int ret; 2130 2131 /* 2132 * TODO: bypass the initialize in sriov for now 2133 */ 2134 if (amdgpu_sriov_vf(psp->adev)) 2135 return 0; 2136 2137 /* bypass dtm initialization if dmu is harvested */ 2138 if (!amdgpu_device_has_display_hardware(psp->adev)) 2139 return 0; 2140 2141 if (!psp->dtm_context.context.bin_desc.size_bytes || 2142 !psp->dtm_context.context.bin_desc.start_addr) { 2143 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 2144 return 0; 2145 } 2146 2147 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 2148 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2149 2150 if (!psp->dtm_context.context.mem_context.shared_buf) { 2151 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 2152 if (ret) 2153 return ret; 2154 } 2155 2156 ret = psp_ta_load(psp, &psp->dtm_context.context); 2157 if (!ret) { 2158 psp->dtm_context.context.initialized = true; 2159 mutex_init(&psp->dtm_context.mutex); 2160 } 2161 2162 return ret; 2163 } 2164 2165 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2166 { 2167 /* 2168 * TODO: bypass the loading in sriov for now 2169 */ 2170 if (amdgpu_sriov_vf(psp->adev)) 2171 return 0; 2172 2173 if (!psp->dtm_context.context.initialized) 2174 return 0; 2175 2176 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 2177 } 2178 2179 static int psp_dtm_terminate(struct psp_context *psp) 2180 { 2181 int ret; 2182 2183 /* 2184 * TODO: bypass the terminate in sriov for now 2185 */ 2186 if (amdgpu_sriov_vf(psp->adev)) 2187 return 0; 2188 2189 if (!psp->dtm_context.context.initialized) 2190 return 0; 2191 2192 ret = psp_ta_unload(psp, &psp->dtm_context.context); 2193 2194 psp->dtm_context.context.initialized = false; 2195 2196 return ret; 2197 } 2198 // DTM end 2199 2200 // RAP start 2201 static int psp_rap_initialize(struct psp_context *psp) 2202 { 2203 int ret; 2204 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 2205 2206 /* 2207 * TODO: bypass the initialize in sriov for now 2208 */ 2209 if (amdgpu_sriov_vf(psp->adev)) 2210 return 0; 2211 2212 if (!psp->rap_context.context.bin_desc.size_bytes || 2213 !psp->rap_context.context.bin_desc.start_addr) { 2214 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 2215 return 0; 2216 } 2217 2218 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 2219 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2220 2221 if (!psp->rap_context.context.mem_context.shared_buf) { 2222 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 2223 if (ret) 2224 return ret; 2225 } 2226 2227 ret = psp_ta_load(psp, &psp->rap_context.context); 2228 if (!ret) { 2229 psp->rap_context.context.initialized = true; 2230 mutex_init(&psp->rap_context.mutex); 2231 } else 2232 return ret; 2233 2234 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2235 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2236 psp_rap_terminate(psp); 2237 /* free rap shared memory */ 2238 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2239 2240 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2241 ret, status); 2242 2243 return ret; 2244 } 2245 2246 return 0; 2247 } 2248 2249 static int psp_rap_terminate(struct psp_context *psp) 2250 { 2251 int ret; 2252 2253 if (!psp->rap_context.context.initialized) 2254 return 0; 2255 2256 ret = psp_ta_unload(psp, &psp->rap_context.context); 2257 2258 psp->rap_context.context.initialized = false; 2259 2260 return ret; 2261 } 2262 2263 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2264 { 2265 struct ta_rap_shared_memory *rap_cmd; 2266 int ret = 0; 2267 2268 if (!psp->rap_context.context.initialized) 2269 return 0; 2270 2271 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2272 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2273 return -EINVAL; 2274 2275 mutex_lock(&psp->rap_context.mutex); 2276 2277 rap_cmd = (struct ta_rap_shared_memory *) 2278 psp->rap_context.context.mem_context.shared_buf; 2279 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2280 2281 rap_cmd->cmd_id = ta_cmd_id; 2282 rap_cmd->validation_method_id = METHOD_A; 2283 2284 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2285 if (ret) 2286 goto out_unlock; 2287 2288 if (status) 2289 *status = rap_cmd->rap_status; 2290 2291 out_unlock: 2292 mutex_unlock(&psp->rap_context.mutex); 2293 2294 return ret; 2295 } 2296 // RAP end 2297 2298 /* securedisplay start */ 2299 static int psp_securedisplay_initialize(struct psp_context *psp) 2300 { 2301 int ret; 2302 struct ta_securedisplay_cmd *securedisplay_cmd; 2303 2304 /* 2305 * TODO: bypass the initialize in sriov for now 2306 */ 2307 if (amdgpu_sriov_vf(psp->adev)) 2308 return 0; 2309 2310 /* bypass securedisplay initialization if dmu is harvested */ 2311 if (!amdgpu_device_has_display_hardware(psp->adev)) 2312 return 0; 2313 2314 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2315 !psp->securedisplay_context.context.bin_desc.start_addr) { 2316 dev_info(psp->adev->dev, 2317 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n"); 2318 return 0; 2319 } 2320 2321 psp->securedisplay_context.context.mem_context.shared_mem_size = 2322 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2323 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2324 2325 if (!psp->securedisplay_context.context.initialized) { 2326 ret = psp_ta_init_shared_buf(psp, 2327 &psp->securedisplay_context.context.mem_context); 2328 if (ret) 2329 return ret; 2330 } 2331 2332 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2333 if (!ret) { 2334 psp->securedisplay_context.context.initialized = true; 2335 mutex_init(&psp->securedisplay_context.mutex); 2336 } else 2337 return ret; 2338 2339 mutex_lock(&psp->securedisplay_context.mutex); 2340 2341 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2342 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2343 2344 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2345 2346 mutex_unlock(&psp->securedisplay_context.mutex); 2347 2348 if (ret) { 2349 psp_securedisplay_terminate(psp); 2350 /* free securedisplay shared memory */ 2351 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2352 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2353 return -EINVAL; 2354 } 2355 2356 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2357 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2358 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2359 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2360 /* don't try again */ 2361 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2362 } 2363 2364 return 0; 2365 } 2366 2367 static int psp_securedisplay_terminate(struct psp_context *psp) 2368 { 2369 int ret; 2370 2371 /* 2372 * TODO:bypass the terminate in sriov for now 2373 */ 2374 if (amdgpu_sriov_vf(psp->adev)) 2375 return 0; 2376 2377 if (!psp->securedisplay_context.context.initialized) 2378 return 0; 2379 2380 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2381 2382 psp->securedisplay_context.context.initialized = false; 2383 2384 return ret; 2385 } 2386 2387 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2388 { 2389 int ret; 2390 2391 if (!psp->securedisplay_context.context.initialized) 2392 return -EINVAL; 2393 2394 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2395 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC && 2396 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2) 2397 return -EINVAL; 2398 2399 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2400 2401 return ret; 2402 } 2403 /* SECUREDISPLAY end */ 2404 2405 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2406 { 2407 struct psp_context *psp = &adev->psp; 2408 int ret = 0; 2409 2410 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2411 ret = psp->funcs->wait_for_bootloader(psp); 2412 2413 return ret; 2414 } 2415 2416 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2417 { 2418 if (psp->funcs && 2419 psp->funcs->get_ras_capability) { 2420 return psp->funcs->get_ras_capability(psp); 2421 } else { 2422 return false; 2423 } 2424 } 2425 2426 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) 2427 { 2428 struct psp_context *psp = &adev->psp; 2429 2430 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2431 return false; 2432 2433 if (psp->funcs && psp->funcs->is_reload_needed) 2434 return psp->funcs->is_reload_needed(psp); 2435 2436 return false; 2437 } 2438 2439 static void psp_update_gpu_addresses(struct amdgpu_device *adev) 2440 { 2441 struct psp_context *psp = &adev->psp; 2442 2443 if (psp->cmd_buf_bo && psp->cmd_buf_mem) { 2444 psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo); 2445 psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo); 2446 psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo); 2447 } 2448 if (adev->firmware.rbuf && psp->km_ring.ring_mem) 2449 psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf); 2450 } 2451 2452 static int psp_hw_start(struct psp_context *psp) 2453 { 2454 struct amdgpu_device *adev = psp->adev; 2455 int ret; 2456 2457 if (amdgpu_virt_xgmi_migrate_enabled(adev)) 2458 psp_update_gpu_addresses(adev); 2459 2460 if (!amdgpu_sriov_vf(adev)) { 2461 if ((is_psp_fw_valid(psp->kdb)) && 2462 (psp->funcs->bootloader_load_kdb != NULL)) { 2463 ret = psp_bootloader_load_kdb(psp); 2464 if (ret) { 2465 dev_err(adev->dev, "PSP load kdb failed!\n"); 2466 return ret; 2467 } 2468 } 2469 2470 if ((is_psp_fw_valid(psp->spl)) && 2471 (psp->funcs->bootloader_load_spl != NULL)) { 2472 ret = psp_bootloader_load_spl(psp); 2473 if (ret) { 2474 dev_err(adev->dev, "PSP load spl failed!\n"); 2475 return ret; 2476 } 2477 } 2478 2479 if ((is_psp_fw_valid(psp->sys)) && 2480 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2481 ret = psp_bootloader_load_sysdrv(psp); 2482 if (ret) { 2483 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2484 return ret; 2485 } 2486 } 2487 2488 if ((is_psp_fw_valid(psp->soc_drv)) && 2489 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2490 ret = psp_bootloader_load_soc_drv(psp); 2491 if (ret) { 2492 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2493 return ret; 2494 } 2495 } 2496 2497 if ((is_psp_fw_valid(psp->intf_drv)) && 2498 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2499 ret = psp_bootloader_load_intf_drv(psp); 2500 if (ret) { 2501 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2502 return ret; 2503 } 2504 } 2505 2506 if ((is_psp_fw_valid(psp->dbg_drv)) && 2507 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2508 ret = psp_bootloader_load_dbg_drv(psp); 2509 if (ret) { 2510 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2511 return ret; 2512 } 2513 } 2514 2515 if ((is_psp_fw_valid(psp->ras_drv)) && 2516 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2517 ret = psp_bootloader_load_ras_drv(psp); 2518 if (ret) { 2519 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2520 return ret; 2521 } 2522 } 2523 2524 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2525 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2526 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2527 if (ret) { 2528 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2529 return ret; 2530 } 2531 } 2532 2533 if ((is_psp_fw_valid(psp->spdm_drv)) && 2534 (psp->funcs->bootloader_load_spdm_drv != NULL)) { 2535 ret = psp_bootloader_load_spdm_drv(psp); 2536 if (ret) { 2537 dev_err(adev->dev, "PSP load spdm_drv failed!\n"); 2538 return ret; 2539 } 2540 } 2541 2542 if ((is_psp_fw_valid(psp->sos)) && 2543 (psp->funcs->bootloader_load_sos != NULL)) { 2544 ret = psp_bootloader_load_sos(psp); 2545 if (ret) { 2546 dev_err(adev->dev, "PSP load sos failed!\n"); 2547 return ret; 2548 } 2549 } 2550 } 2551 2552 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2553 if (ret) { 2554 dev_err(adev->dev, "PSP create ring failed!\n"); 2555 return ret; 2556 } 2557 2558 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2559 ret = psp_update_fw_reservation(psp); 2560 if (ret) { 2561 dev_err(adev->dev, "update fw reservation failed!\n"); 2562 return ret; 2563 } 2564 } 2565 2566 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2567 goto skip_pin_bo; 2568 2569 if (!psp->boot_time_tmr || psp->autoload_supported) { 2570 ret = psp_tmr_init(psp); 2571 if (ret) { 2572 dev_err(adev->dev, "PSP tmr init failed!\n"); 2573 return ret; 2574 } 2575 } 2576 2577 skip_pin_bo: 2578 /* 2579 * For ASICs with DF Cstate management centralized 2580 * to PMFW, TMR setup should be performed after PMFW 2581 * loaded and before other non-psp firmware loaded. 2582 */ 2583 if (psp->pmfw_centralized_cstate_management) { 2584 ret = psp_load_smu_fw(psp); 2585 if (ret) 2586 return ret; 2587 } 2588 2589 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2590 ret = psp_tmr_load(psp); 2591 if (ret) { 2592 dev_err(adev->dev, "PSP load tmr failed!\n"); 2593 return ret; 2594 } 2595 } 2596 2597 return 0; 2598 } 2599 2600 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2601 enum psp_gfx_fw_type *type) 2602 { 2603 switch (ucode->ucode_id) { 2604 case AMDGPU_UCODE_ID_CAP: 2605 *type = GFX_FW_TYPE_CAP; 2606 break; 2607 case AMDGPU_UCODE_ID_SDMA0: 2608 *type = GFX_FW_TYPE_SDMA0; 2609 break; 2610 case AMDGPU_UCODE_ID_SDMA1: 2611 *type = GFX_FW_TYPE_SDMA1; 2612 break; 2613 case AMDGPU_UCODE_ID_SDMA2: 2614 *type = GFX_FW_TYPE_SDMA2; 2615 break; 2616 case AMDGPU_UCODE_ID_SDMA3: 2617 *type = GFX_FW_TYPE_SDMA3; 2618 break; 2619 case AMDGPU_UCODE_ID_SDMA4: 2620 *type = GFX_FW_TYPE_SDMA4; 2621 break; 2622 case AMDGPU_UCODE_ID_SDMA5: 2623 *type = GFX_FW_TYPE_SDMA5; 2624 break; 2625 case AMDGPU_UCODE_ID_SDMA6: 2626 *type = GFX_FW_TYPE_SDMA6; 2627 break; 2628 case AMDGPU_UCODE_ID_SDMA7: 2629 *type = GFX_FW_TYPE_SDMA7; 2630 break; 2631 case AMDGPU_UCODE_ID_CP_MES: 2632 *type = GFX_FW_TYPE_CP_MES; 2633 break; 2634 case AMDGPU_UCODE_ID_CP_MES_DATA: 2635 *type = GFX_FW_TYPE_MES_STACK; 2636 break; 2637 case AMDGPU_UCODE_ID_CP_MES1: 2638 *type = GFX_FW_TYPE_CP_MES_KIQ; 2639 break; 2640 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2641 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2642 break; 2643 case AMDGPU_UCODE_ID_CP_CE: 2644 *type = GFX_FW_TYPE_CP_CE; 2645 break; 2646 case AMDGPU_UCODE_ID_CP_PFP: 2647 *type = GFX_FW_TYPE_CP_PFP; 2648 break; 2649 case AMDGPU_UCODE_ID_CP_ME: 2650 *type = GFX_FW_TYPE_CP_ME; 2651 break; 2652 case AMDGPU_UCODE_ID_CP_MEC1: 2653 *type = GFX_FW_TYPE_CP_MEC; 2654 break; 2655 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2656 *type = GFX_FW_TYPE_CP_MEC_ME1; 2657 break; 2658 case AMDGPU_UCODE_ID_CP_MEC2: 2659 *type = GFX_FW_TYPE_CP_MEC; 2660 break; 2661 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2662 *type = GFX_FW_TYPE_CP_MEC_ME2; 2663 break; 2664 case AMDGPU_UCODE_ID_RLC_P: 2665 *type = GFX_FW_TYPE_RLC_P; 2666 break; 2667 case AMDGPU_UCODE_ID_RLC_V: 2668 *type = GFX_FW_TYPE_RLC_V; 2669 break; 2670 case AMDGPU_UCODE_ID_RLC_G: 2671 *type = GFX_FW_TYPE_RLC_G; 2672 break; 2673 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2674 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2675 break; 2676 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2677 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2678 break; 2679 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2680 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2681 break; 2682 case AMDGPU_UCODE_ID_RLC_IRAM: 2683 *type = GFX_FW_TYPE_RLC_IRAM; 2684 break; 2685 case AMDGPU_UCODE_ID_RLC_DRAM: 2686 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2687 break; 2688 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2689 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2690 break; 2691 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2692 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2693 break; 2694 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2695 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2696 break; 2697 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2698 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2699 break; 2700 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2701 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2702 break; 2703 case AMDGPU_UCODE_ID_SMC: 2704 *type = GFX_FW_TYPE_SMU; 2705 break; 2706 case AMDGPU_UCODE_ID_PPTABLE: 2707 *type = GFX_FW_TYPE_PPTABLE; 2708 break; 2709 case AMDGPU_UCODE_ID_UVD: 2710 *type = GFX_FW_TYPE_UVD; 2711 break; 2712 case AMDGPU_UCODE_ID_UVD1: 2713 *type = GFX_FW_TYPE_UVD1; 2714 break; 2715 case AMDGPU_UCODE_ID_VCE: 2716 *type = GFX_FW_TYPE_VCE; 2717 break; 2718 case AMDGPU_UCODE_ID_VCN: 2719 *type = GFX_FW_TYPE_VCN; 2720 break; 2721 case AMDGPU_UCODE_ID_VCN1: 2722 *type = GFX_FW_TYPE_VCN1; 2723 break; 2724 case AMDGPU_UCODE_ID_DMCU_ERAM: 2725 *type = GFX_FW_TYPE_DMCU_ERAM; 2726 break; 2727 case AMDGPU_UCODE_ID_DMCU_INTV: 2728 *type = GFX_FW_TYPE_DMCU_ISR; 2729 break; 2730 case AMDGPU_UCODE_ID_VCN0_RAM: 2731 *type = GFX_FW_TYPE_VCN0_RAM; 2732 break; 2733 case AMDGPU_UCODE_ID_VCN1_RAM: 2734 *type = GFX_FW_TYPE_VCN1_RAM; 2735 break; 2736 case AMDGPU_UCODE_ID_DMCUB: 2737 *type = GFX_FW_TYPE_DMUB; 2738 break; 2739 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2740 case AMDGPU_UCODE_ID_SDMA_RS64: 2741 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2742 break; 2743 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2744 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2745 break; 2746 case AMDGPU_UCODE_ID_IMU_I: 2747 *type = GFX_FW_TYPE_IMU_I; 2748 break; 2749 case AMDGPU_UCODE_ID_IMU_D: 2750 *type = GFX_FW_TYPE_IMU_D; 2751 break; 2752 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2753 *type = GFX_FW_TYPE_RS64_PFP; 2754 break; 2755 case AMDGPU_UCODE_ID_CP_RS64_ME: 2756 *type = GFX_FW_TYPE_RS64_ME; 2757 break; 2758 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2759 *type = GFX_FW_TYPE_RS64_MEC; 2760 break; 2761 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2762 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2763 break; 2764 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2765 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2766 break; 2767 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2768 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2769 break; 2770 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2771 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2772 break; 2773 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2774 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2775 break; 2776 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2777 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2778 break; 2779 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2780 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2781 break; 2782 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2783 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2784 break; 2785 case AMDGPU_UCODE_ID_VPE_CTX: 2786 *type = GFX_FW_TYPE_VPEC_FW1; 2787 break; 2788 case AMDGPU_UCODE_ID_VPE_CTL: 2789 *type = GFX_FW_TYPE_VPEC_FW2; 2790 break; 2791 case AMDGPU_UCODE_ID_VPE: 2792 *type = GFX_FW_TYPE_VPE; 2793 break; 2794 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2795 *type = GFX_FW_TYPE_UMSCH_UCODE; 2796 break; 2797 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2798 *type = GFX_FW_TYPE_UMSCH_DATA; 2799 break; 2800 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2801 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2802 break; 2803 case AMDGPU_UCODE_ID_P2S_TABLE: 2804 *type = GFX_FW_TYPE_P2S_TABLE; 2805 break; 2806 case AMDGPU_UCODE_ID_JPEG_RAM: 2807 *type = GFX_FW_TYPE_JPEG_RAM; 2808 break; 2809 case AMDGPU_UCODE_ID_ISP: 2810 *type = GFX_FW_TYPE_ISP; 2811 break; 2812 case AMDGPU_UCODE_ID_MAXIMUM: 2813 default: 2814 return -EINVAL; 2815 } 2816 2817 return 0; 2818 } 2819 2820 static void psp_print_fw_hdr(struct psp_context *psp, 2821 struct amdgpu_firmware_info *ucode) 2822 { 2823 struct amdgpu_device *adev = psp->adev; 2824 struct common_firmware_header *hdr; 2825 2826 switch (ucode->ucode_id) { 2827 case AMDGPU_UCODE_ID_SDMA0: 2828 case AMDGPU_UCODE_ID_SDMA1: 2829 case AMDGPU_UCODE_ID_SDMA2: 2830 case AMDGPU_UCODE_ID_SDMA3: 2831 case AMDGPU_UCODE_ID_SDMA4: 2832 case AMDGPU_UCODE_ID_SDMA5: 2833 case AMDGPU_UCODE_ID_SDMA6: 2834 case AMDGPU_UCODE_ID_SDMA7: 2835 hdr = (struct common_firmware_header *) 2836 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2837 amdgpu_ucode_print_sdma_hdr(hdr); 2838 break; 2839 case AMDGPU_UCODE_ID_CP_CE: 2840 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2841 amdgpu_ucode_print_gfx_hdr(hdr); 2842 break; 2843 case AMDGPU_UCODE_ID_CP_PFP: 2844 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2845 amdgpu_ucode_print_gfx_hdr(hdr); 2846 break; 2847 case AMDGPU_UCODE_ID_CP_ME: 2848 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2849 amdgpu_ucode_print_gfx_hdr(hdr); 2850 break; 2851 case AMDGPU_UCODE_ID_CP_MEC1: 2852 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2853 amdgpu_ucode_print_gfx_hdr(hdr); 2854 break; 2855 case AMDGPU_UCODE_ID_RLC_G: 2856 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2857 amdgpu_ucode_print_rlc_hdr(hdr); 2858 break; 2859 case AMDGPU_UCODE_ID_SMC: 2860 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2861 amdgpu_ucode_print_smc_hdr(hdr); 2862 break; 2863 default: 2864 break; 2865 } 2866 } 2867 2868 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2869 struct amdgpu_firmware_info *ucode, 2870 struct psp_gfx_cmd_resp *cmd) 2871 { 2872 int ret; 2873 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2874 2875 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2876 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2877 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2878 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2879 2880 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2881 if (ret) 2882 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2883 2884 return ret; 2885 } 2886 2887 int psp_execute_ip_fw_load(struct psp_context *psp, 2888 struct amdgpu_firmware_info *ucode) 2889 { 2890 int ret = 0; 2891 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2892 2893 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2894 if (!ret) { 2895 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2896 psp->fence_buf_mc_addr); 2897 } 2898 2899 release_psp_cmd_buf(psp); 2900 2901 return ret; 2902 } 2903 2904 static int psp_load_p2s_table(struct psp_context *psp) 2905 { 2906 int ret; 2907 struct amdgpu_device *adev = psp->adev; 2908 struct amdgpu_firmware_info *ucode = 2909 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2910 2911 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2912 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2913 return 0; 2914 2915 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 2916 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 2917 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2918 0x0036003C; 2919 if (psp->sos.fw_version < supp_vers) 2920 return 0; 2921 } 2922 2923 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2924 return 0; 2925 2926 ret = psp_execute_ip_fw_load(psp, ucode); 2927 2928 return ret; 2929 } 2930 2931 static int psp_load_smu_fw(struct psp_context *psp) 2932 { 2933 int ret; 2934 struct amdgpu_device *adev = psp->adev; 2935 struct amdgpu_firmware_info *ucode = 2936 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2937 struct amdgpu_ras *ras = psp->ras_context.ras; 2938 2939 /* 2940 * Skip SMU FW reloading in case of using BACO for runpm only, 2941 * as SMU is always alive. 2942 */ 2943 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2944 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2945 return 0; 2946 2947 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2948 return 0; 2949 2950 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2951 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2952 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2953 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2954 if (ret) 2955 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2956 } 2957 2958 ret = psp_execute_ip_fw_load(psp, ucode); 2959 2960 if (ret) 2961 dev_err(adev->dev, "PSP load smu failed!\n"); 2962 2963 return ret; 2964 } 2965 2966 static bool fw_load_skip_check(struct psp_context *psp, 2967 struct amdgpu_firmware_info *ucode) 2968 { 2969 if (!ucode->fw || !ucode->ucode_size) 2970 return true; 2971 2972 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2973 return true; 2974 2975 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2976 (psp_smu_reload_quirk(psp) || 2977 psp->autoload_supported || 2978 psp->pmfw_centralized_cstate_management)) 2979 return true; 2980 2981 if (amdgpu_sriov_vf(psp->adev) && 2982 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2983 return true; 2984 2985 if (psp->autoload_supported && 2986 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2987 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2988 /* skip mec JT when autoload is enabled */ 2989 return true; 2990 2991 return false; 2992 } 2993 2994 int psp_load_fw_list(struct psp_context *psp, 2995 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2996 { 2997 int ret = 0, i; 2998 struct amdgpu_firmware_info *ucode; 2999 3000 for (i = 0; i < ucode_count; ++i) { 3001 ucode = ucode_list[i]; 3002 psp_print_fw_hdr(psp, ucode); 3003 ret = psp_execute_ip_fw_load(psp, ucode); 3004 if (ret) 3005 return ret; 3006 } 3007 return ret; 3008 } 3009 3010 static int psp_load_non_psp_fw(struct psp_context *psp) 3011 { 3012 int i, ret; 3013 struct amdgpu_firmware_info *ucode; 3014 struct amdgpu_device *adev = psp->adev; 3015 3016 if (psp->autoload_supported && 3017 !psp->pmfw_centralized_cstate_management) { 3018 ret = psp_load_smu_fw(psp); 3019 if (ret) 3020 return ret; 3021 } 3022 3023 /* Load P2S table first if it's available */ 3024 psp_load_p2s_table(psp); 3025 3026 for (i = 0; i < adev->firmware.max_ucodes; i++) { 3027 ucode = &adev->firmware.ucode[i]; 3028 3029 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 3030 !fw_load_skip_check(psp, ucode)) { 3031 ret = psp_load_smu_fw(psp); 3032 if (ret) 3033 return ret; 3034 continue; 3035 } 3036 3037 if (fw_load_skip_check(psp, ucode)) 3038 continue; 3039 3040 if (psp->autoload_supported && 3041 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3042 IP_VERSION(11, 0, 7) || 3043 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3044 IP_VERSION(11, 0, 11) || 3045 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3046 IP_VERSION(11, 0, 12)) && 3047 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 3048 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 3049 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 3050 /* PSP only receive one SDMA fw for sienna_cichlid, 3051 * as all four sdma fw are same 3052 */ 3053 continue; 3054 3055 psp_print_fw_hdr(psp, ucode); 3056 3057 ret = psp_execute_ip_fw_load(psp, ucode); 3058 if (ret) 3059 return ret; 3060 3061 /* Start rlc autoload after psp received all the gfx firmware */ 3062 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 3063 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 3064 ret = psp_rlc_autoload_start(psp); 3065 if (ret) { 3066 dev_err(adev->dev, "Failed to start rlc autoload\n"); 3067 return ret; 3068 } 3069 } 3070 } 3071 3072 return 0; 3073 } 3074 3075 static int psp_load_fw(struct amdgpu_device *adev) 3076 { 3077 int ret; 3078 struct psp_context *psp = &adev->psp; 3079 3080 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3081 /* should not destroy ring, only stop */ 3082 psp_ring_stop(psp, PSP_RING_TYPE__KM); 3083 } else { 3084 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 3085 3086 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 3087 if (ret) { 3088 dev_err(adev->dev, "PSP ring init failed!\n"); 3089 goto failed; 3090 } 3091 } 3092 3093 ret = psp_hw_start(psp); 3094 if (ret) 3095 goto failed; 3096 3097 ret = psp_load_non_psp_fw(psp); 3098 if (ret) 3099 goto failed1; 3100 3101 ret = psp_asd_initialize(psp); 3102 if (ret) { 3103 dev_err(adev->dev, "PSP load asd failed!\n"); 3104 goto failed1; 3105 } 3106 3107 ret = psp_rl_load(adev); 3108 if (ret) { 3109 dev_err(adev->dev, "PSP load RL failed!\n"); 3110 goto failed1; 3111 } 3112 3113 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3114 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3115 ret = psp_xgmi_initialize(psp, false, true); 3116 /* Warning the XGMI seesion initialize failure 3117 * Instead of stop driver initialization 3118 */ 3119 if (ret) 3120 dev_err(psp->adev->dev, 3121 "XGMI: Failed to initialize XGMI session\n"); 3122 } 3123 } 3124 3125 if (psp->ta_fw) { 3126 ret = psp_ras_initialize(psp); 3127 if (ret) 3128 dev_err(psp->adev->dev, 3129 "RAS: Failed to initialize RAS\n"); 3130 3131 ret = psp_hdcp_initialize(psp); 3132 if (ret) 3133 dev_err(psp->adev->dev, 3134 "HDCP: Failed to initialize HDCP\n"); 3135 3136 ret = psp_dtm_initialize(psp); 3137 if (ret) 3138 dev_err(psp->adev->dev, 3139 "DTM: Failed to initialize DTM\n"); 3140 3141 ret = psp_rap_initialize(psp); 3142 if (ret) 3143 dev_err(psp->adev->dev, 3144 "RAP: Failed to initialize RAP\n"); 3145 3146 ret = psp_securedisplay_initialize(psp); 3147 if (ret) 3148 dev_err(psp->adev->dev, 3149 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3150 } 3151 3152 return 0; 3153 3154 failed1: 3155 psp_free_shared_bufs(psp); 3156 failed: 3157 /* 3158 * all cleanup jobs (xgmi terminate, ras terminate, 3159 * ring destroy, cmd/fence/fw buffers destory, 3160 * psp->cmd destory) are delayed to psp_hw_fini 3161 */ 3162 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3163 return ret; 3164 } 3165 3166 static int psp_hw_init(struct amdgpu_ip_block *ip_block) 3167 { 3168 int ret; 3169 struct amdgpu_device *adev = ip_block->adev; 3170 3171 mutex_lock(&adev->firmware.mutex); 3172 3173 ret = amdgpu_ucode_init_bo(adev); 3174 if (ret) 3175 goto failed; 3176 3177 ret = psp_load_fw(adev); 3178 if (ret) { 3179 dev_err(adev->dev, "PSP firmware loading failed\n"); 3180 goto failed; 3181 } 3182 3183 mutex_unlock(&adev->firmware.mutex); 3184 return 0; 3185 3186 failed: 3187 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 3188 mutex_unlock(&adev->firmware.mutex); 3189 return -EINVAL; 3190 } 3191 3192 static int psp_hw_fini(struct amdgpu_ip_block *ip_block) 3193 { 3194 struct amdgpu_device *adev = ip_block->adev; 3195 struct psp_context *psp = &adev->psp; 3196 3197 if (psp->ta_fw) { 3198 psp_ras_terminate(psp); 3199 psp_securedisplay_terminate(psp); 3200 psp_rap_terminate(psp); 3201 psp_dtm_terminate(psp); 3202 psp_hdcp_terminate(psp); 3203 3204 if (adev->gmc.xgmi.num_physical_nodes > 1) 3205 psp_xgmi_terminate(psp); 3206 } 3207 3208 psp_asd_terminate(psp); 3209 psp_tmr_terminate(psp); 3210 3211 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3212 3213 return 0; 3214 } 3215 3216 static int psp_suspend(struct amdgpu_ip_block *ip_block) 3217 { 3218 int ret = 0; 3219 struct amdgpu_device *adev = ip_block->adev; 3220 struct psp_context *psp = &adev->psp; 3221 3222 if (adev->gmc.xgmi.num_physical_nodes > 1 && 3223 psp->xgmi_context.context.initialized) { 3224 ret = psp_xgmi_terminate(psp); 3225 if (ret) { 3226 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 3227 goto out; 3228 } 3229 } 3230 3231 if (psp->ta_fw) { 3232 ret = psp_ras_terminate(psp); 3233 if (ret) { 3234 dev_err(adev->dev, "Failed to terminate ras ta\n"); 3235 goto out; 3236 } 3237 ret = psp_hdcp_terminate(psp); 3238 if (ret) { 3239 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 3240 goto out; 3241 } 3242 ret = psp_dtm_terminate(psp); 3243 if (ret) { 3244 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 3245 goto out; 3246 } 3247 ret = psp_rap_terminate(psp); 3248 if (ret) { 3249 dev_err(adev->dev, "Failed to terminate rap ta\n"); 3250 goto out; 3251 } 3252 ret = psp_securedisplay_terminate(psp); 3253 if (ret) { 3254 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 3255 goto out; 3256 } 3257 } 3258 3259 ret = psp_asd_terminate(psp); 3260 if (ret) { 3261 dev_err(adev->dev, "Failed to terminate asd\n"); 3262 goto out; 3263 } 3264 3265 ret = psp_tmr_terminate(psp); 3266 if (ret) { 3267 dev_err(adev->dev, "Failed to terminate tmr\n"); 3268 goto out; 3269 } 3270 3271 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 3272 if (ret) 3273 dev_err(adev->dev, "PSP ring stop failed\n"); 3274 3275 out: 3276 return ret; 3277 } 3278 3279 static int psp_resume(struct amdgpu_ip_block *ip_block) 3280 { 3281 int ret; 3282 struct amdgpu_device *adev = ip_block->adev; 3283 struct psp_context *psp = &adev->psp; 3284 3285 dev_info(adev->dev, "PSP is resuming...\n"); 3286 3287 if (psp->mem_train_ctx.enable_mem_training) { 3288 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3289 if (ret) { 3290 dev_err(adev->dev, "Failed to process memory training!\n"); 3291 return ret; 3292 } 3293 } 3294 3295 mutex_lock(&adev->firmware.mutex); 3296 3297 ret = amdgpu_ucode_init_bo(adev); 3298 if (ret) 3299 goto failed; 3300 3301 ret = psp_hw_start(psp); 3302 if (ret) 3303 goto failed; 3304 3305 ret = psp_load_non_psp_fw(psp); 3306 if (ret) 3307 goto failed; 3308 3309 ret = psp_asd_initialize(psp); 3310 if (ret) { 3311 dev_err(adev->dev, "PSP load asd failed!\n"); 3312 goto failed; 3313 } 3314 3315 ret = psp_rl_load(adev); 3316 if (ret) { 3317 dev_err(adev->dev, "PSP load RL failed!\n"); 3318 goto failed; 3319 } 3320 3321 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3322 ret = psp_xgmi_initialize(psp, false, true); 3323 /* Warning the XGMI seesion initialize failure 3324 * Instead of stop driver initialization 3325 */ 3326 if (ret) 3327 dev_err(psp->adev->dev, 3328 "XGMI: Failed to initialize XGMI session\n"); 3329 } 3330 3331 if (psp->ta_fw) { 3332 ret = psp_ras_initialize(psp); 3333 if (ret) 3334 dev_err(psp->adev->dev, 3335 "RAS: Failed to initialize RAS\n"); 3336 3337 ret = psp_hdcp_initialize(psp); 3338 if (ret) 3339 dev_err(psp->adev->dev, 3340 "HDCP: Failed to initialize HDCP\n"); 3341 3342 ret = psp_dtm_initialize(psp); 3343 if (ret) 3344 dev_err(psp->adev->dev, 3345 "DTM: Failed to initialize DTM\n"); 3346 3347 ret = psp_rap_initialize(psp); 3348 if (ret) 3349 dev_err(psp->adev->dev, 3350 "RAP: Failed to initialize RAP\n"); 3351 3352 ret = psp_securedisplay_initialize(psp); 3353 if (ret) 3354 dev_err(psp->adev->dev, 3355 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3356 } 3357 3358 mutex_unlock(&adev->firmware.mutex); 3359 3360 return 0; 3361 3362 failed: 3363 dev_err(adev->dev, "PSP resume failed\n"); 3364 mutex_unlock(&adev->firmware.mutex); 3365 return ret; 3366 } 3367 3368 int psp_gpu_reset(struct amdgpu_device *adev) 3369 { 3370 int ret; 3371 3372 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3373 return 0; 3374 3375 mutex_lock(&adev->psp.mutex); 3376 ret = psp_mode1_reset(&adev->psp); 3377 mutex_unlock(&adev->psp.mutex); 3378 3379 return ret; 3380 } 3381 3382 int psp_rlc_autoload_start(struct psp_context *psp) 3383 { 3384 int ret; 3385 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3386 3387 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3388 3389 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3390 psp->fence_buf_mc_addr); 3391 3392 release_psp_cmd_buf(psp); 3393 3394 return ret; 3395 } 3396 3397 int psp_ring_cmd_submit(struct psp_context *psp, 3398 uint64_t cmd_buf_mc_addr, 3399 uint64_t fence_mc_addr, 3400 int index) 3401 { 3402 unsigned int psp_write_ptr_reg = 0; 3403 struct psp_gfx_rb_frame *write_frame; 3404 struct psp_ring *ring = &psp->km_ring; 3405 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3406 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3407 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3408 struct amdgpu_device *adev = psp->adev; 3409 uint32_t ring_size_dw = ring->ring_size / 4; 3410 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3411 3412 /* KM (GPCOM) prepare write pointer */ 3413 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3414 3415 /* Update KM RB frame pointer to new frame */ 3416 /* write_frame ptr increments by size of rb_frame in bytes */ 3417 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3418 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3419 write_frame = ring_buffer_start; 3420 else 3421 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3422 /* Check invalid write_frame ptr address */ 3423 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3424 dev_err(adev->dev, 3425 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3426 ring_buffer_start, ring_buffer_end, write_frame); 3427 dev_err(adev->dev, 3428 "write_frame is pointing to address out of bounds\n"); 3429 return -EINVAL; 3430 } 3431 3432 /* Initialize KM RB frame */ 3433 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3434 3435 /* Update KM RB frame */ 3436 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3437 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3438 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3439 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3440 write_frame->fence_value = index; 3441 amdgpu_device_flush_hdp(adev, NULL); 3442 3443 /* Update the write Pointer in DWORDs */ 3444 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3445 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3446 return 0; 3447 } 3448 3449 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3450 { 3451 struct amdgpu_device *adev = psp->adev; 3452 const struct psp_firmware_header_v1_0 *asd_hdr; 3453 int err = 0; 3454 3455 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED, 3456 "amdgpu/%s_asd.bin", chip_name); 3457 if (err) 3458 goto out; 3459 3460 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3461 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3462 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3463 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3464 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3465 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3466 return 0; 3467 out: 3468 amdgpu_ucode_release(&adev->psp.asd_fw); 3469 return err; 3470 } 3471 3472 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3473 { 3474 struct amdgpu_device *adev = psp->adev; 3475 const struct psp_firmware_header_v1_0 *toc_hdr; 3476 int err = 0; 3477 3478 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED, 3479 "amdgpu/%s_toc.bin", chip_name); 3480 if (err) 3481 goto out; 3482 3483 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3484 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3485 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3486 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3487 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3488 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3489 return 0; 3490 out: 3491 amdgpu_ucode_release(&adev->psp.toc_fw); 3492 return err; 3493 } 3494 3495 static int parse_sos_bin_descriptor(struct psp_context *psp, 3496 const struct psp_fw_bin_desc *desc, 3497 const struct psp_firmware_header_v2_0 *sos_hdr) 3498 { 3499 uint8_t *ucode_start_addr = NULL; 3500 3501 if (!psp || !desc || !sos_hdr) 3502 return -EINVAL; 3503 3504 ucode_start_addr = (uint8_t *)sos_hdr + 3505 le32_to_cpu(desc->offset_bytes) + 3506 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3507 3508 switch (desc->fw_type) { 3509 case PSP_FW_TYPE_PSP_SOS: 3510 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3511 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3512 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3513 psp->sos.start_addr = ucode_start_addr; 3514 break; 3515 case PSP_FW_TYPE_PSP_SYS_DRV: 3516 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3517 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3518 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3519 psp->sys.start_addr = ucode_start_addr; 3520 break; 3521 case PSP_FW_TYPE_PSP_KDB: 3522 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3523 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3524 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3525 psp->kdb.start_addr = ucode_start_addr; 3526 break; 3527 case PSP_FW_TYPE_PSP_TOC: 3528 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3529 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3530 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3531 psp->toc.start_addr = ucode_start_addr; 3532 break; 3533 case PSP_FW_TYPE_PSP_SPL: 3534 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3535 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3536 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3537 psp->spl.start_addr = ucode_start_addr; 3538 break; 3539 case PSP_FW_TYPE_PSP_RL: 3540 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3541 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3542 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3543 psp->rl.start_addr = ucode_start_addr; 3544 break; 3545 case PSP_FW_TYPE_PSP_SOC_DRV: 3546 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3547 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3548 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3549 psp->soc_drv.start_addr = ucode_start_addr; 3550 break; 3551 case PSP_FW_TYPE_PSP_INTF_DRV: 3552 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3553 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3554 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3555 psp->intf_drv.start_addr = ucode_start_addr; 3556 break; 3557 case PSP_FW_TYPE_PSP_DBG_DRV: 3558 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3559 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3560 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3561 psp->dbg_drv.start_addr = ucode_start_addr; 3562 break; 3563 case PSP_FW_TYPE_PSP_RAS_DRV: 3564 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3565 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3566 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3567 psp->ras_drv.start_addr = ucode_start_addr; 3568 break; 3569 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3570 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3571 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3572 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3573 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3574 break; 3575 case PSP_FW_TYPE_PSP_SPDM_DRV: 3576 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version); 3577 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version); 3578 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3579 psp->spdm_drv.start_addr = ucode_start_addr; 3580 break; 3581 default: 3582 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3583 break; 3584 } 3585 3586 return 0; 3587 } 3588 3589 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3590 { 3591 const struct psp_firmware_header_v1_0 *sos_hdr; 3592 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3593 uint8_t *ucode_array_start_addr; 3594 3595 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3596 ucode_array_start_addr = (uint8_t *)sos_hdr + 3597 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3598 3599 if (adev->gmc.xgmi.connected_to_cpu || 3600 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3601 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3602 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3603 3604 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3605 adev->psp.sys.start_addr = ucode_array_start_addr; 3606 3607 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3608 adev->psp.sos.start_addr = ucode_array_start_addr + 3609 le32_to_cpu(sos_hdr->sos.offset_bytes); 3610 } else { 3611 /* Load alternate PSP SOS FW */ 3612 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3613 3614 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3615 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3616 3617 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3618 adev->psp.sys.start_addr = ucode_array_start_addr + 3619 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3620 3621 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3622 adev->psp.sos.start_addr = ucode_array_start_addr + 3623 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3624 } 3625 3626 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3627 dev_warn(adev->dev, "PSP SOS FW not available"); 3628 return -EINVAL; 3629 } 3630 3631 return 0; 3632 } 3633 3634 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3635 { 3636 struct amdgpu_device *adev = psp->adev; 3637 const struct psp_firmware_header_v1_0 *sos_hdr; 3638 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3639 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3640 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3641 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3642 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; 3643 int fw_index, fw_bin_count, start_index = 0; 3644 const struct psp_fw_bin_desc *fw_bin; 3645 uint8_t *ucode_array_start_addr; 3646 int err = 0; 3647 3648 if (amdgpu_is_kicker_fw(adev)) 3649 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3650 "amdgpu/%s_sos_kicker.bin", chip_name); 3651 else 3652 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3653 "amdgpu/%s_sos.bin", chip_name); 3654 if (err) 3655 goto out; 3656 3657 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3658 ucode_array_start_addr = (uint8_t *)sos_hdr + 3659 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3660 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3661 3662 switch (sos_hdr->header.header_version_major) { 3663 case 1: 3664 err = psp_init_sos_base_fw(adev); 3665 if (err) 3666 goto out; 3667 3668 if (sos_hdr->header.header_version_minor == 1) { 3669 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3670 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3671 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3672 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3673 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3674 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3675 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3676 } 3677 if (sos_hdr->header.header_version_minor == 2) { 3678 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3679 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3680 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3681 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3682 } 3683 if (sos_hdr->header.header_version_minor == 3) { 3684 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3685 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3686 adev->psp.toc.start_addr = ucode_array_start_addr + 3687 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3688 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3689 adev->psp.kdb.start_addr = ucode_array_start_addr + 3690 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3691 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3692 adev->psp.spl.start_addr = ucode_array_start_addr + 3693 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3694 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3695 adev->psp.rl.start_addr = ucode_array_start_addr + 3696 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3697 } 3698 break; 3699 case 2: 3700 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3701 3702 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); 3703 3704 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { 3705 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3706 err = -EINVAL; 3707 goto out; 3708 } 3709 3710 if (sos_hdr_v2_0->header.header_version_minor == 1) { 3711 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; 3712 3713 fw_bin = sos_hdr_v2_1->psp_fw_bin; 3714 3715 if (psp_is_aux_sos_load_required(psp)) 3716 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3717 else 3718 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3719 3720 } else { 3721 fw_bin = sos_hdr_v2_0->psp_fw_bin; 3722 } 3723 3724 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { 3725 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, 3726 sos_hdr_v2_0); 3727 if (err) 3728 goto out; 3729 } 3730 break; 3731 default: 3732 dev_err(adev->dev, 3733 "unsupported psp sos firmware\n"); 3734 err = -EINVAL; 3735 goto out; 3736 } 3737 3738 return 0; 3739 out: 3740 amdgpu_ucode_release(&adev->psp.sos_fw); 3741 3742 return err; 3743 } 3744 3745 static bool is_ta_fw_applicable(struct psp_context *psp, 3746 const struct psp_fw_bin_desc *desc) 3747 { 3748 struct amdgpu_device *adev = psp->adev; 3749 uint32_t fw_version; 3750 3751 switch (desc->fw_type) { 3752 case TA_FW_TYPE_PSP_XGMI: 3753 case TA_FW_TYPE_PSP_XGMI_AUX: 3754 /* for now, AUX TA only exists on 13.0.6 ta bin, 3755 * from v20.00.0x.14 3756 */ 3757 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3758 IP_VERSION(13, 0, 6)) { 3759 fw_version = le32_to_cpu(desc->fw_version); 3760 3761 if (adev->flags & AMD_IS_APU && 3762 (fw_version & 0xff) >= 0x14) 3763 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; 3764 else 3765 return desc->fw_type == TA_FW_TYPE_PSP_XGMI; 3766 } 3767 break; 3768 default: 3769 break; 3770 } 3771 3772 return true; 3773 } 3774 3775 static int parse_ta_bin_descriptor(struct psp_context *psp, 3776 const struct psp_fw_bin_desc *desc, 3777 const struct ta_firmware_header_v2_0 *ta_hdr) 3778 { 3779 uint8_t *ucode_start_addr = NULL; 3780 3781 if (!psp || !desc || !ta_hdr) 3782 return -EINVAL; 3783 3784 if (!is_ta_fw_applicable(psp, desc)) 3785 return 0; 3786 3787 ucode_start_addr = (uint8_t *)ta_hdr + 3788 le32_to_cpu(desc->offset_bytes) + 3789 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3790 3791 switch (desc->fw_type) { 3792 case TA_FW_TYPE_PSP_ASD: 3793 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3794 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3795 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3796 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3797 break; 3798 case TA_FW_TYPE_PSP_XGMI: 3799 case TA_FW_TYPE_PSP_XGMI_AUX: 3800 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3801 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3802 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3803 break; 3804 case TA_FW_TYPE_PSP_RAS: 3805 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3806 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3807 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3808 break; 3809 case TA_FW_TYPE_PSP_HDCP: 3810 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3811 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3812 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3813 break; 3814 case TA_FW_TYPE_PSP_DTM: 3815 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3816 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3817 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3818 break; 3819 case TA_FW_TYPE_PSP_RAP: 3820 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3821 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3822 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3823 break; 3824 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3825 psp->securedisplay_context.context.bin_desc.fw_version = 3826 le32_to_cpu(desc->fw_version); 3827 psp->securedisplay_context.context.bin_desc.size_bytes = 3828 le32_to_cpu(desc->size_bytes); 3829 psp->securedisplay_context.context.bin_desc.start_addr = 3830 ucode_start_addr; 3831 break; 3832 default: 3833 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3834 break; 3835 } 3836 3837 return 0; 3838 } 3839 3840 static int parse_ta_v1_microcode(struct psp_context *psp) 3841 { 3842 const struct ta_firmware_header_v1_0 *ta_hdr; 3843 struct amdgpu_device *adev = psp->adev; 3844 3845 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3846 3847 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3848 return -EINVAL; 3849 3850 adev->psp.xgmi_context.context.bin_desc.fw_version = 3851 le32_to_cpu(ta_hdr->xgmi.fw_version); 3852 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3853 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3854 adev->psp.xgmi_context.context.bin_desc.start_addr = 3855 (uint8_t *)ta_hdr + 3856 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3857 3858 adev->psp.ras_context.context.bin_desc.fw_version = 3859 le32_to_cpu(ta_hdr->ras.fw_version); 3860 adev->psp.ras_context.context.bin_desc.size_bytes = 3861 le32_to_cpu(ta_hdr->ras.size_bytes); 3862 adev->psp.ras_context.context.bin_desc.start_addr = 3863 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3864 le32_to_cpu(ta_hdr->ras.offset_bytes); 3865 3866 adev->psp.hdcp_context.context.bin_desc.fw_version = 3867 le32_to_cpu(ta_hdr->hdcp.fw_version); 3868 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3869 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3870 adev->psp.hdcp_context.context.bin_desc.start_addr = 3871 (uint8_t *)ta_hdr + 3872 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3873 3874 adev->psp.dtm_context.context.bin_desc.fw_version = 3875 le32_to_cpu(ta_hdr->dtm.fw_version); 3876 adev->psp.dtm_context.context.bin_desc.size_bytes = 3877 le32_to_cpu(ta_hdr->dtm.size_bytes); 3878 adev->psp.dtm_context.context.bin_desc.start_addr = 3879 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3880 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3881 3882 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3883 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3884 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3885 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3886 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3887 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3888 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3889 3890 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3891 3892 return 0; 3893 } 3894 3895 static int parse_ta_v2_microcode(struct psp_context *psp) 3896 { 3897 const struct ta_firmware_header_v2_0 *ta_hdr; 3898 struct amdgpu_device *adev = psp->adev; 3899 int err = 0; 3900 int ta_index = 0; 3901 3902 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3903 3904 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3905 return -EINVAL; 3906 3907 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3908 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3909 return -EINVAL; 3910 } 3911 3912 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3913 err = parse_ta_bin_descriptor(psp, 3914 &ta_hdr->ta_fw_bin[ta_index], 3915 ta_hdr); 3916 if (err) 3917 return err; 3918 } 3919 3920 return 0; 3921 } 3922 3923 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3924 { 3925 const struct common_firmware_header *hdr; 3926 struct amdgpu_device *adev = psp->adev; 3927 int err; 3928 3929 if (amdgpu_is_kicker_fw(adev)) 3930 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3931 "amdgpu/%s_ta_kicker.bin", chip_name); 3932 else 3933 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3934 "amdgpu/%s_ta.bin", chip_name); 3935 if (err) 3936 return err; 3937 3938 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3939 switch (le16_to_cpu(hdr->header_version_major)) { 3940 case 1: 3941 err = parse_ta_v1_microcode(psp); 3942 break; 3943 case 2: 3944 err = parse_ta_v2_microcode(psp); 3945 break; 3946 default: 3947 dev_err(adev->dev, "unsupported TA header version\n"); 3948 err = -EINVAL; 3949 } 3950 3951 if (err) 3952 amdgpu_ucode_release(&adev->psp.ta_fw); 3953 3954 return err; 3955 } 3956 3957 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3958 { 3959 struct amdgpu_device *adev = psp->adev; 3960 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3961 struct amdgpu_firmware_info *info = NULL; 3962 int err = 0; 3963 3964 if (!amdgpu_sriov_vf(adev)) { 3965 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3966 return -EINVAL; 3967 } 3968 3969 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL, 3970 "amdgpu/%s_cap.bin", chip_name); 3971 if (err) { 3972 if (err == -ENODEV) { 3973 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3974 err = 0; 3975 } else { 3976 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3977 } 3978 goto out; 3979 } 3980 3981 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3982 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3983 info->fw = adev->psp.cap_fw; 3984 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3985 adev->psp.cap_fw->data; 3986 adev->firmware.fw_size += ALIGN( 3987 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3988 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3989 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3990 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3991 3992 return 0; 3993 3994 out: 3995 amdgpu_ucode_release(&adev->psp.cap_fw); 3996 return err; 3997 } 3998 3999 int psp_config_sq_perfmon(struct psp_context *psp, 4000 uint32_t xcp_id, bool core_override_enable, 4001 bool reg_override_enable, bool perfmon_override_enable) 4002 { 4003 int ret; 4004 4005 if (amdgpu_sriov_vf(psp->adev)) 4006 return 0; 4007 4008 if (xcp_id > MAX_XCP) { 4009 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); 4010 return -EINVAL; 4011 } 4012 4013 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { 4014 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", 4015 amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); 4016 return -EINVAL; 4017 } 4018 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 4019 4020 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; 4021 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); 4022 cmd->cmd.config_sq_perfmon.core_override = core_override_enable; 4023 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; 4024 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; 4025 4026 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 4027 if (ret) 4028 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", 4029 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); 4030 4031 release_psp_cmd_buf(psp); 4032 return ret; 4033 } 4034 4035 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 4036 enum amd_clockgating_state state) 4037 { 4038 return 0; 4039 } 4040 4041 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block, 4042 enum amd_powergating_state state) 4043 { 4044 return 0; 4045 } 4046 4047 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 4048 struct device_attribute *attr, 4049 char *buf) 4050 { 4051 struct drm_device *ddev = dev_get_drvdata(dev); 4052 struct amdgpu_device *adev = drm_to_adev(ddev); 4053 struct amdgpu_ip_block *ip_block; 4054 uint32_t fw_ver; 4055 int ret; 4056 4057 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4058 if (!ip_block || !ip_block->status.late_initialized) { 4059 dev_info(adev->dev, "PSP block is not ready yet\n."); 4060 return -EBUSY; 4061 } 4062 4063 mutex_lock(&adev->psp.mutex); 4064 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 4065 mutex_unlock(&adev->psp.mutex); 4066 4067 if (ret) { 4068 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 4069 return ret; 4070 } 4071 4072 return sysfs_emit(buf, "%x\n", fw_ver); 4073 } 4074 4075 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 4076 struct device_attribute *attr, 4077 const char *buf, 4078 size_t count) 4079 { 4080 struct drm_device *ddev = dev_get_drvdata(dev); 4081 struct amdgpu_device *adev = drm_to_adev(ddev); 4082 int ret, idx; 4083 const struct firmware *usbc_pd_fw; 4084 struct amdgpu_bo *fw_buf_bo = NULL; 4085 uint64_t fw_pri_mc_addr; 4086 void *fw_pri_cpu_addr; 4087 struct amdgpu_ip_block *ip_block; 4088 4089 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4090 if (!ip_block || !ip_block->status.late_initialized) { 4091 dev_err(adev->dev, "PSP block is not ready yet."); 4092 return -EBUSY; 4093 } 4094 4095 if (!drm_dev_enter(ddev, &idx)) 4096 return -ENODEV; 4097 4098 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED, 4099 "amdgpu/%s", buf); 4100 if (ret) 4101 goto fail; 4102 4103 /* LFB address which is aligned to 1MB boundary per PSP request */ 4104 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 4105 AMDGPU_GEM_DOMAIN_VRAM | 4106 AMDGPU_GEM_DOMAIN_GTT, 4107 &fw_buf_bo, &fw_pri_mc_addr, 4108 &fw_pri_cpu_addr); 4109 if (ret) 4110 goto rel_buf; 4111 4112 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 4113 4114 mutex_lock(&adev->psp.mutex); 4115 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 4116 mutex_unlock(&adev->psp.mutex); 4117 4118 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4119 4120 rel_buf: 4121 amdgpu_ucode_release(&usbc_pd_fw); 4122 fail: 4123 if (ret) { 4124 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 4125 count = ret; 4126 } 4127 4128 drm_dev_exit(idx); 4129 return count; 4130 } 4131 4132 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 4133 { 4134 int idx; 4135 4136 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 4137 return; 4138 4139 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 4140 memcpy(psp->fw_pri_buf, start_addr, bin_size); 4141 4142 drm_dev_exit(idx); 4143 } 4144 4145 /** 4146 * DOC: usbc_pd_fw 4147 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 4148 * this file will trigger the update process. 4149 */ 4150 static DEVICE_ATTR(usbc_pd_fw, 0644, 4151 psp_usbc_pd_fw_sysfs_read, 4152 psp_usbc_pd_fw_sysfs_write); 4153 4154 int is_psp_fw_valid(struct psp_bin_desc bin) 4155 { 4156 return bin.size_bytes; 4157 } 4158 4159 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 4160 const struct bin_attribute *bin_attr, 4161 char *buffer, loff_t pos, size_t count) 4162 { 4163 struct device *dev = kobj_to_dev(kobj); 4164 struct drm_device *ddev = dev_get_drvdata(dev); 4165 struct amdgpu_device *adev = drm_to_adev(ddev); 4166 4167 adev->psp.vbflash_done = false; 4168 4169 /* Safeguard against memory drain */ 4170 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 4171 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 4172 kvfree(adev->psp.vbflash_tmp_buf); 4173 adev->psp.vbflash_tmp_buf = NULL; 4174 adev->psp.vbflash_image_size = 0; 4175 return -ENOMEM; 4176 } 4177 4178 /* TODO Just allocate max for now and optimize to realloc later if needed */ 4179 if (!adev->psp.vbflash_tmp_buf) { 4180 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 4181 if (!adev->psp.vbflash_tmp_buf) 4182 return -ENOMEM; 4183 } 4184 4185 mutex_lock(&adev->psp.mutex); 4186 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 4187 adev->psp.vbflash_image_size += count; 4188 mutex_unlock(&adev->psp.mutex); 4189 4190 dev_dbg(adev->dev, "IFWI staged for update\n"); 4191 4192 return count; 4193 } 4194 4195 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 4196 const struct bin_attribute *bin_attr, char *buffer, 4197 loff_t pos, size_t count) 4198 { 4199 struct device *dev = kobj_to_dev(kobj); 4200 struct drm_device *ddev = dev_get_drvdata(dev); 4201 struct amdgpu_device *adev = drm_to_adev(ddev); 4202 struct amdgpu_bo *fw_buf_bo = NULL; 4203 uint64_t fw_pri_mc_addr; 4204 void *fw_pri_cpu_addr; 4205 int ret; 4206 4207 if (adev->psp.vbflash_image_size == 0) 4208 return -EINVAL; 4209 4210 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 4211 4212 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 4213 AMDGPU_GPU_PAGE_SIZE, 4214 AMDGPU_GEM_DOMAIN_VRAM, 4215 &fw_buf_bo, 4216 &fw_pri_mc_addr, 4217 &fw_pri_cpu_addr); 4218 if (ret) 4219 goto rel_buf; 4220 4221 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 4222 4223 mutex_lock(&adev->psp.mutex); 4224 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 4225 mutex_unlock(&adev->psp.mutex); 4226 4227 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4228 4229 rel_buf: 4230 kvfree(adev->psp.vbflash_tmp_buf); 4231 adev->psp.vbflash_tmp_buf = NULL; 4232 adev->psp.vbflash_image_size = 0; 4233 4234 if (ret) { 4235 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 4236 return ret; 4237 } 4238 4239 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 4240 return 0; 4241 } 4242 4243 /** 4244 * DOC: psp_vbflash 4245 * Writing to this file will stage an IFWI for update. Reading from this file 4246 * will trigger the update process. 4247 */ 4248 static const struct bin_attribute psp_vbflash_bin_attr = { 4249 .attr = {.name = "psp_vbflash", .mode = 0660}, 4250 .size = 0, 4251 .write_new = amdgpu_psp_vbflash_write, 4252 .read_new = amdgpu_psp_vbflash_read, 4253 }; 4254 4255 /** 4256 * DOC: psp_vbflash_status 4257 * The status of the flash process. 4258 * 0: IFWI flash not complete. 4259 * 1: IFWI flash complete. 4260 */ 4261 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 4262 struct device_attribute *attr, 4263 char *buf) 4264 { 4265 struct drm_device *ddev = dev_get_drvdata(dev); 4266 struct amdgpu_device *adev = drm_to_adev(ddev); 4267 uint32_t vbflash_status; 4268 4269 vbflash_status = psp_vbflash_status(&adev->psp); 4270 if (!adev->psp.vbflash_done) 4271 vbflash_status = 0; 4272 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 4273 vbflash_status = 1; 4274 4275 return sysfs_emit(buf, "0x%x\n", vbflash_status); 4276 } 4277 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 4278 4279 static const struct bin_attribute *const bin_flash_attrs[] = { 4280 &psp_vbflash_bin_attr, 4281 NULL 4282 }; 4283 4284 static struct attribute *flash_attrs[] = { 4285 &dev_attr_psp_vbflash_status.attr, 4286 &dev_attr_usbc_pd_fw.attr, 4287 NULL 4288 }; 4289 4290 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 4291 { 4292 struct device *dev = kobj_to_dev(kobj); 4293 struct drm_device *ddev = dev_get_drvdata(dev); 4294 struct amdgpu_device *adev = drm_to_adev(ddev); 4295 4296 if (attr == &dev_attr_usbc_pd_fw.attr) 4297 return adev->psp.sup_pd_fw_up ? 0660 : 0; 4298 4299 return adev->psp.sup_ifwi_up ? 0440 : 0; 4300 } 4301 4302 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 4303 const struct bin_attribute *attr, 4304 int idx) 4305 { 4306 struct device *dev = kobj_to_dev(kobj); 4307 struct drm_device *ddev = dev_get_drvdata(dev); 4308 struct amdgpu_device *adev = drm_to_adev(ddev); 4309 4310 return adev->psp.sup_ifwi_up ? 0660 : 0; 4311 } 4312 4313 const struct attribute_group amdgpu_flash_attr_group = { 4314 .attrs = flash_attrs, 4315 .bin_attrs_new = bin_flash_attrs, 4316 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 4317 .is_visible = amdgpu_flash_attr_is_visible, 4318 }; 4319 4320 #if defined(CONFIG_DEBUG_FS) 4321 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp) 4322 { 4323 struct amdgpu_device *adev = filp->f_inode->i_private; 4324 struct spirom_bo *bo_triplet; 4325 int ret; 4326 4327 /* serialize the open() file calling */ 4328 if (!mutex_trylock(&adev->psp.mutex)) 4329 return -EBUSY; 4330 4331 /* 4332 * make sure only one userpace process is alive for dumping so that 4333 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed. 4334 * let's say the case where one process try opening the file while 4335 * another one has proceeded to read or release. In this way, eliminate 4336 * the use of mutex for read() or release() callback as well. 4337 */ 4338 if (adev->psp.spirom_dump_trip) { 4339 mutex_unlock(&adev->psp.mutex); 4340 return -EBUSY; 4341 } 4342 4343 bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL); 4344 if (!bo_triplet) { 4345 mutex_unlock(&adev->psp.mutex); 4346 return -ENOMEM; 4347 } 4348 4349 ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2, 4350 AMDGPU_GPU_PAGE_SIZE, 4351 AMDGPU_GEM_DOMAIN_GTT, 4352 &bo_triplet->bo, 4353 &bo_triplet->mc_addr, 4354 &bo_triplet->cpu_addr); 4355 if (ret) 4356 goto rel_trip; 4357 4358 ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr); 4359 if (ret) 4360 goto rel_bo; 4361 4362 adev->psp.spirom_dump_trip = bo_triplet; 4363 mutex_unlock(&adev->psp.mutex); 4364 return 0; 4365 rel_bo: 4366 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4367 &bo_triplet->cpu_addr); 4368 rel_trip: 4369 kfree(bo_triplet); 4370 mutex_unlock(&adev->psp.mutex); 4371 dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret); 4372 return ret; 4373 } 4374 4375 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size, 4376 loff_t *pos) 4377 { 4378 struct amdgpu_device *adev = filp->f_inode->i_private; 4379 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4380 4381 if (!bo_triplet) 4382 return -EINVAL; 4383 4384 return simple_read_from_buffer(buf, 4385 size, 4386 pos, bo_triplet->cpu_addr, 4387 AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4388 } 4389 4390 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp) 4391 { 4392 struct amdgpu_device *adev = filp->f_inode->i_private; 4393 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4394 4395 if (bo_triplet) { 4396 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4397 &bo_triplet->cpu_addr); 4398 kfree(bo_triplet); 4399 } 4400 4401 adev->psp.spirom_dump_trip = NULL; 4402 return 0; 4403 } 4404 4405 static const struct file_operations psp_dump_spirom_debugfs_ops = { 4406 .owner = THIS_MODULE, 4407 .open = psp_read_spirom_debugfs_open, 4408 .read = psp_read_spirom_debugfs_read, 4409 .release = psp_read_spirom_debugfs_release, 4410 .llseek = default_llseek, 4411 }; 4412 #endif 4413 4414 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev) 4415 { 4416 #if defined(CONFIG_DEBUG_FS) 4417 struct drm_minor *minor = adev_to_drm(adev)->primary; 4418 4419 debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root, 4420 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4421 #endif 4422 } 4423 4424 const struct amd_ip_funcs psp_ip_funcs = { 4425 .name = "psp", 4426 .early_init = psp_early_init, 4427 .sw_init = psp_sw_init, 4428 .sw_fini = psp_sw_fini, 4429 .hw_init = psp_hw_init, 4430 .hw_fini = psp_hw_fini, 4431 .suspend = psp_suspend, 4432 .resume = psp_resume, 4433 .set_clockgating_state = psp_set_clockgating_state, 4434 .set_powergating_state = psp_set_powergating_state, 4435 }; 4436 4437 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 4438 .type = AMD_IP_BLOCK_TYPE_PSP, 4439 .major = 3, 4440 .minor = 1, 4441 .rev = 0, 4442 .funcs = &psp_ip_funcs, 4443 }; 4444 4445 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 4446 .type = AMD_IP_BLOCK_TYPE_PSP, 4447 .major = 10, 4448 .minor = 0, 4449 .rev = 0, 4450 .funcs = &psp_ip_funcs, 4451 }; 4452 4453 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 4454 .type = AMD_IP_BLOCK_TYPE_PSP, 4455 .major = 11, 4456 .minor = 0, 4457 .rev = 0, 4458 .funcs = &psp_ip_funcs, 4459 }; 4460 4461 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 4462 .type = AMD_IP_BLOCK_TYPE_PSP, 4463 .major = 11, 4464 .minor = 0, 4465 .rev = 8, 4466 .funcs = &psp_ip_funcs, 4467 }; 4468 4469 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 4470 .type = AMD_IP_BLOCK_TYPE_PSP, 4471 .major = 12, 4472 .minor = 0, 4473 .rev = 0, 4474 .funcs = &psp_ip_funcs, 4475 }; 4476 4477 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4478 .type = AMD_IP_BLOCK_TYPE_PSP, 4479 .major = 13, 4480 .minor = 0, 4481 .rev = 0, 4482 .funcs = &psp_ip_funcs, 4483 }; 4484 4485 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4486 .type = AMD_IP_BLOCK_TYPE_PSP, 4487 .major = 13, 4488 .minor = 0, 4489 .rev = 4, 4490 .funcs = &psp_ip_funcs, 4491 }; 4492 4493 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4494 .type = AMD_IP_BLOCK_TYPE_PSP, 4495 .major = 14, 4496 .minor = 0, 4497 .rev = 0, 4498 .funcs = &psp_ip_funcs, 4499 }; 4500