1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 43 #include "amdgpu_ras.h" 44 #include "amdgpu_securedisplay.h" 45 #include "amdgpu_atomfirmware.h" 46 47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16) 48 49 static int psp_load_smu_fw(struct psp_context *psp); 50 static int psp_rap_terminate(struct psp_context *psp); 51 static int psp_securedisplay_terminate(struct psp_context *psp); 52 53 static int psp_ring_init(struct psp_context *psp, 54 enum psp_ring_type ring_type) 55 { 56 int ret = 0; 57 struct psp_ring *ring; 58 struct amdgpu_device *adev = psp->adev; 59 60 ring = &psp->km_ring; 61 62 ring->ring_type = ring_type; 63 64 /* allocate 4k Page of Local Frame Buffer memory for ring */ 65 ring->ring_size = 0x1000; 66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 67 AMDGPU_GEM_DOMAIN_VRAM | 68 AMDGPU_GEM_DOMAIN_GTT, 69 &adev->firmware.rbuf, 70 &ring->ring_mem_mc_addr, 71 (void **)&ring->ring_mem); 72 if (ret) { 73 ring->ring_size = 0; 74 return ret; 75 } 76 77 return 0; 78 } 79 80 /* 81 * Due to DF Cstate management centralized to PMFW, the firmware 82 * loading sequence will be updated as below: 83 * - Load KDB 84 * - Load SYS_DRV 85 * - Load tOS 86 * - Load PMFW 87 * - Setup TMR 88 * - Load other non-psp fw 89 * - Load ASD 90 * - Load XGMI/RAS/HDCP/DTM TA if any 91 * 92 * This new sequence is required for 93 * - Arcturus and onwards 94 */ 95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 96 { 97 struct amdgpu_device *adev = psp->adev; 98 99 if (amdgpu_sriov_vf(adev)) { 100 psp->pmfw_centralized_cstate_management = false; 101 return; 102 } 103 104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 105 case IP_VERSION(11, 0, 0): 106 case IP_VERSION(11, 0, 4): 107 case IP_VERSION(11, 0, 5): 108 case IP_VERSION(11, 0, 7): 109 case IP_VERSION(11, 0, 9): 110 case IP_VERSION(11, 0, 11): 111 case IP_VERSION(11, 0, 12): 112 case IP_VERSION(11, 0, 13): 113 case IP_VERSION(13, 0, 0): 114 case IP_VERSION(13, 0, 2): 115 case IP_VERSION(13, 0, 7): 116 psp->pmfw_centralized_cstate_management = true; 117 break; 118 default: 119 psp->pmfw_centralized_cstate_management = false; 120 break; 121 } 122 } 123 124 static int psp_init_sriov_microcode(struct psp_context *psp) 125 { 126 struct amdgpu_device *adev = psp->adev; 127 char ucode_prefix[30]; 128 int ret = 0; 129 130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 131 132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 133 case IP_VERSION(9, 0, 0): 134 case IP_VERSION(11, 0, 7): 135 case IP_VERSION(11, 0, 9): 136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 137 ret = psp_init_cap_microcode(psp, ucode_prefix); 138 break; 139 case IP_VERSION(13, 0, 2): 140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 141 ret = psp_init_cap_microcode(psp, ucode_prefix); 142 ret &= psp_init_ta_microcode(psp, ucode_prefix); 143 break; 144 case IP_VERSION(13, 0, 0): 145 adev->virt.autoload_ucode_id = 0; 146 break; 147 case IP_VERSION(13, 0, 6): 148 case IP_VERSION(13, 0, 14): 149 ret = psp_init_cap_microcode(psp, ucode_prefix); 150 ret &= psp_init_ta_microcode(psp, ucode_prefix); 151 break; 152 case IP_VERSION(13, 0, 10): 153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 154 ret = psp_init_cap_microcode(psp, ucode_prefix); 155 break; 156 case IP_VERSION(13, 0, 12): 157 ret = psp_init_ta_microcode(psp, ucode_prefix); 158 break; 159 default: 160 return -EINVAL; 161 } 162 return ret; 163 } 164 165 static int psp_early_init(struct amdgpu_ip_block *ip_block) 166 { 167 struct amdgpu_device *adev = ip_block->adev; 168 struct psp_context *psp = &adev->psp; 169 170 psp->autoload_supported = true; 171 psp->boot_time_tmr = true; 172 173 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 174 case IP_VERSION(9, 0, 0): 175 psp_v3_1_set_psp_funcs(psp); 176 psp->autoload_supported = false; 177 psp->boot_time_tmr = false; 178 break; 179 case IP_VERSION(10, 0, 0): 180 case IP_VERSION(10, 0, 1): 181 psp_v10_0_set_psp_funcs(psp); 182 psp->autoload_supported = false; 183 psp->boot_time_tmr = false; 184 break; 185 case IP_VERSION(11, 0, 2): 186 case IP_VERSION(11, 0, 4): 187 psp_v11_0_set_psp_funcs(psp); 188 psp->autoload_supported = false; 189 psp->boot_time_tmr = false; 190 break; 191 case IP_VERSION(11, 0, 0): 192 case IP_VERSION(11, 0, 7): 193 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 194 fallthrough; 195 case IP_VERSION(11, 0, 5): 196 case IP_VERSION(11, 0, 9): 197 case IP_VERSION(11, 0, 11): 198 case IP_VERSION(11, 5, 0): 199 case IP_VERSION(11, 5, 2): 200 case IP_VERSION(11, 0, 12): 201 case IP_VERSION(11, 0, 13): 202 psp_v11_0_set_psp_funcs(psp); 203 psp->boot_time_tmr = false; 204 break; 205 case IP_VERSION(11, 0, 3): 206 case IP_VERSION(12, 0, 1): 207 psp_v12_0_set_psp_funcs(psp); 208 psp->autoload_supported = false; 209 psp->boot_time_tmr = false; 210 break; 211 case IP_VERSION(13, 0, 2): 212 psp->boot_time_tmr = false; 213 fallthrough; 214 case IP_VERSION(13, 0, 6): 215 case IP_VERSION(13, 0, 14): 216 psp_v13_0_set_psp_funcs(psp); 217 psp->autoload_supported = false; 218 break; 219 case IP_VERSION(13, 0, 12): 220 psp_v13_0_set_psp_funcs(psp); 221 psp->autoload_supported = false; 222 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 223 break; 224 case IP_VERSION(13, 0, 1): 225 case IP_VERSION(13, 0, 3): 226 case IP_VERSION(13, 0, 5): 227 case IP_VERSION(13, 0, 8): 228 case IP_VERSION(13, 0, 11): 229 case IP_VERSION(14, 0, 0): 230 case IP_VERSION(14, 0, 1): 231 case IP_VERSION(14, 0, 4): 232 psp_v13_0_set_psp_funcs(psp); 233 psp->boot_time_tmr = false; 234 break; 235 case IP_VERSION(11, 0, 8): 236 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 237 psp_v11_0_8_set_psp_funcs(psp); 238 } 239 psp->autoload_supported = false; 240 psp->boot_time_tmr = false; 241 break; 242 case IP_VERSION(13, 0, 0): 243 case IP_VERSION(13, 0, 7): 244 case IP_VERSION(13, 0, 10): 245 psp_v13_0_set_psp_funcs(psp); 246 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 247 psp->boot_time_tmr = false; 248 break; 249 case IP_VERSION(13, 0, 4): 250 psp_v13_0_4_set_psp_funcs(psp); 251 psp->boot_time_tmr = false; 252 break; 253 case IP_VERSION(14, 0, 2): 254 case IP_VERSION(14, 0, 3): 255 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 256 psp_v14_0_set_psp_funcs(psp); 257 break; 258 case IP_VERSION(14, 0, 5): 259 psp_v14_0_set_psp_funcs(psp); 260 psp->boot_time_tmr = false; 261 break; 262 default: 263 return -EINVAL; 264 } 265 266 psp->adev = adev; 267 268 adev->psp_timeout = 20000; 269 270 psp_check_pmfw_centralized_cstate_management(psp); 271 272 if (amdgpu_sriov_vf(adev)) 273 return psp_init_sriov_microcode(psp); 274 else 275 return psp_init_microcode(psp); 276 } 277 278 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 279 { 280 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 281 &mem_ctx->shared_buf); 282 mem_ctx->shared_bo = NULL; 283 } 284 285 static void psp_free_shared_bufs(struct psp_context *psp) 286 { 287 void *tmr_buf; 288 void **pptr; 289 290 /* free TMR memory buffer */ 291 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 292 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 293 psp->tmr_bo = NULL; 294 295 /* free xgmi shared memory */ 296 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 297 298 /* free ras shared memory */ 299 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 300 301 /* free hdcp shared memory */ 302 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 303 304 /* free dtm shared memory */ 305 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 306 307 /* free rap shared memory */ 308 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 309 310 /* free securedisplay shared memory */ 311 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 312 313 314 } 315 316 static void psp_memory_training_fini(struct psp_context *psp) 317 { 318 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 319 320 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 321 kfree(ctx->sys_cache); 322 ctx->sys_cache = NULL; 323 } 324 325 static int psp_memory_training_init(struct psp_context *psp) 326 { 327 int ret; 328 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 329 330 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 331 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 332 return 0; 333 } 334 335 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 336 if (ctx->sys_cache == NULL) { 337 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 338 ret = -ENOMEM; 339 goto Err_out; 340 } 341 342 dev_dbg(psp->adev->dev, 343 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 344 ctx->train_data_size, 345 ctx->p2c_train_data_offset, 346 ctx->c2p_train_data_offset); 347 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 348 return 0; 349 350 Err_out: 351 psp_memory_training_fini(psp); 352 return ret; 353 } 354 355 /* 356 * Helper funciton to query psp runtime database entry 357 * 358 * @adev: amdgpu_device pointer 359 * @entry_type: the type of psp runtime database entry 360 * @db_entry: runtime database entry pointer 361 * 362 * Return false if runtime database doesn't exit or entry is invalid 363 * or true if the specific database entry is found, and copy to @db_entry 364 */ 365 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 366 enum psp_runtime_entry_type entry_type, 367 void *db_entry) 368 { 369 uint64_t db_header_pos, db_dir_pos; 370 struct psp_runtime_data_header db_header = {0}; 371 struct psp_runtime_data_directory db_dir = {0}; 372 bool ret = false; 373 int i; 374 375 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 376 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 377 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 378 return false; 379 380 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 381 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 382 383 /* read runtime db header from vram */ 384 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 385 sizeof(struct psp_runtime_data_header), false); 386 387 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 388 /* runtime db doesn't exist, exit */ 389 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 390 return false; 391 } 392 393 /* read runtime database entry from vram */ 394 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 395 sizeof(struct psp_runtime_data_directory), false); 396 397 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 398 /* invalid db entry count, exit */ 399 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 400 return false; 401 } 402 403 /* look up for requested entry type */ 404 for (i = 0; i < db_dir.entry_count && !ret; i++) { 405 if (db_dir.entry_list[i].entry_type == entry_type) { 406 switch (entry_type) { 407 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 408 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 409 /* invalid db entry size */ 410 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 411 return false; 412 } 413 /* read runtime database entry */ 414 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 415 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 416 ret = true; 417 break; 418 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 419 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 420 /* invalid db entry size */ 421 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 422 return false; 423 } 424 /* read runtime database entry */ 425 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 426 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 427 ret = true; 428 break; 429 default: 430 ret = false; 431 break; 432 } 433 } 434 } 435 436 return ret; 437 } 438 439 static int psp_sw_init(struct amdgpu_ip_block *ip_block) 440 { 441 struct amdgpu_device *adev = ip_block->adev; 442 struct psp_context *psp = &adev->psp; 443 int ret; 444 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 445 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 446 struct psp_runtime_scpm_entry scpm_entry; 447 448 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 449 if (!psp->cmd) { 450 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 451 ret = -ENOMEM; 452 } 453 454 adev->psp.xgmi_context.supports_extended_data = 455 !adev->gmc.xgmi.connected_to_cpu && 456 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 457 458 memset(&scpm_entry, 0, sizeof(scpm_entry)); 459 if ((psp_get_runtime_db_entry(adev, 460 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 461 &scpm_entry)) && 462 (scpm_entry.scpm_status != SCPM_DISABLE)) { 463 adev->scpm_enabled = true; 464 adev->scpm_status = scpm_entry.scpm_status; 465 } else { 466 adev->scpm_enabled = false; 467 adev->scpm_status = SCPM_DISABLE; 468 } 469 470 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 471 472 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 473 if (psp_get_runtime_db_entry(adev, 474 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 475 &boot_cfg_entry)) { 476 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 477 if ((psp->boot_cfg_bitmask) & 478 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 479 /* If psp runtime database exists, then 480 * only enable two stage memory training 481 * when TWO_STAGE_DRAM_TRAINING bit is set 482 * in runtime database 483 */ 484 mem_training_ctx->enable_mem_training = true; 485 } 486 487 } else { 488 /* If psp runtime database doesn't exist or is 489 * invalid, force enable two stage memory training 490 */ 491 mem_training_ctx->enable_mem_training = true; 492 } 493 494 if (mem_training_ctx->enable_mem_training) { 495 ret = psp_memory_training_init(psp); 496 if (ret) { 497 dev_err(adev->dev, "Failed to initialize memory training!\n"); 498 return ret; 499 } 500 501 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 502 if (ret) { 503 dev_err(adev->dev, "Failed to process memory training!\n"); 504 return ret; 505 } 506 } 507 508 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 509 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 510 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 511 &psp->fw_pri_bo, 512 &psp->fw_pri_mc_addr, 513 &psp->fw_pri_buf); 514 if (ret) 515 return ret; 516 517 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 518 AMDGPU_GEM_DOMAIN_VRAM | 519 AMDGPU_GEM_DOMAIN_GTT, 520 &psp->fence_buf_bo, 521 &psp->fence_buf_mc_addr, 522 &psp->fence_buf); 523 if (ret) 524 goto failed1; 525 526 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 527 AMDGPU_GEM_DOMAIN_VRAM | 528 AMDGPU_GEM_DOMAIN_GTT, 529 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 530 (void **)&psp->cmd_buf_mem); 531 if (ret) 532 goto failed2; 533 534 return 0; 535 536 failed2: 537 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 538 &psp->fence_buf_mc_addr, &psp->fence_buf); 539 failed1: 540 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 541 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 542 return ret; 543 } 544 545 static int psp_sw_fini(struct amdgpu_ip_block *ip_block) 546 { 547 struct amdgpu_device *adev = ip_block->adev; 548 struct psp_context *psp = &adev->psp; 549 550 psp_memory_training_fini(psp); 551 552 amdgpu_ucode_release(&psp->sos_fw); 553 amdgpu_ucode_release(&psp->asd_fw); 554 amdgpu_ucode_release(&psp->ta_fw); 555 amdgpu_ucode_release(&psp->cap_fw); 556 amdgpu_ucode_release(&psp->toc_fw); 557 558 kfree(psp->cmd); 559 psp->cmd = NULL; 560 561 psp_free_shared_bufs(psp); 562 563 if (psp->km_ring.ring_mem) 564 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 565 &psp->km_ring.ring_mem_mc_addr, 566 (void **)&psp->km_ring.ring_mem); 567 568 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 569 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 570 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 571 &psp->fence_buf_mc_addr, &psp->fence_buf); 572 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 573 (void **)&psp->cmd_buf_mem); 574 575 return 0; 576 } 577 578 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val, 579 uint32_t mask, uint32_t flags) 580 { 581 bool check_changed = flags & PSP_WAITREG_CHANGED; 582 bool verbose = !(flags & PSP_WAITREG_NOVERBOSE); 583 uint32_t val; 584 int i; 585 struct amdgpu_device *adev = psp->adev; 586 587 if (psp->adev->no_hw_access) 588 return 0; 589 590 for (i = 0; i < adev->usec_timeout; i++) { 591 val = RREG32(reg_index); 592 if (check_changed) { 593 if (val != reg_val) 594 return 0; 595 } else { 596 if ((val & mask) == reg_val) 597 return 0; 598 } 599 udelay(1); 600 } 601 602 if (verbose) 603 dev_err(adev->dev, 604 "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x", 605 reg_index, mask, val, reg_val); 606 607 return -ETIME; 608 } 609 610 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 611 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 612 { 613 uint32_t val; 614 int i; 615 struct amdgpu_device *adev = psp->adev; 616 617 if (psp->adev->no_hw_access) 618 return 0; 619 620 for (i = 0; i < msec_timeout; i++) { 621 val = RREG32(reg_index); 622 if ((val & mask) == reg_val) 623 return 0; 624 msleep(1); 625 } 626 627 return -ETIME; 628 } 629 630 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 631 { 632 switch (cmd_id) { 633 case GFX_CMD_ID_LOAD_TA: 634 return "LOAD_TA"; 635 case GFX_CMD_ID_UNLOAD_TA: 636 return "UNLOAD_TA"; 637 case GFX_CMD_ID_INVOKE_CMD: 638 return "INVOKE_CMD"; 639 case GFX_CMD_ID_LOAD_ASD: 640 return "LOAD_ASD"; 641 case GFX_CMD_ID_SETUP_TMR: 642 return "SETUP_TMR"; 643 case GFX_CMD_ID_LOAD_IP_FW: 644 return "LOAD_IP_FW"; 645 case GFX_CMD_ID_DESTROY_TMR: 646 return "DESTROY_TMR"; 647 case GFX_CMD_ID_SAVE_RESTORE: 648 return "SAVE_RESTORE_IP_FW"; 649 case GFX_CMD_ID_SETUP_VMR: 650 return "SETUP_VMR"; 651 case GFX_CMD_ID_DESTROY_VMR: 652 return "DESTROY_VMR"; 653 case GFX_CMD_ID_PROG_REG: 654 return "PROG_REG"; 655 case GFX_CMD_ID_GET_FW_ATTESTATION: 656 return "GET_FW_ATTESTATION"; 657 case GFX_CMD_ID_LOAD_TOC: 658 return "ID_LOAD_TOC"; 659 case GFX_CMD_ID_AUTOLOAD_RLC: 660 return "AUTOLOAD_RLC"; 661 case GFX_CMD_ID_BOOT_CFG: 662 return "BOOT_CFG"; 663 case GFX_CMD_ID_CONFIG_SQ_PERFMON: 664 return "CONFIG_SQ_PERFMON"; 665 case GFX_CMD_ID_FB_FW_RESERV_ADDR: 666 return "FB_FW_RESERV_ADDR"; 667 case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR: 668 return "FB_FW_RESERV_EXT_ADDR"; 669 case GFX_CMD_ID_SRIOV_SPATIAL_PART: 670 return "SPATIAL_PARTITION"; 671 case GFX_CMD_ID_FB_NPS_MODE: 672 return "NPS_MODE_CHANGE"; 673 default: 674 return "UNKNOWN CMD"; 675 } 676 } 677 678 static bool psp_err_warn(struct psp_context *psp) 679 { 680 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 681 682 /* This response indicates reg list is already loaded */ 683 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 684 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 685 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 686 cmd->resp.status == TEE_ERROR_CANCEL) 687 return false; 688 689 return true; 690 } 691 692 static int 693 psp_cmd_submit_buf(struct psp_context *psp, 694 struct amdgpu_firmware_info *ucode, 695 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 696 { 697 int ret; 698 int index; 699 int timeout = psp->adev->psp_timeout; 700 bool ras_intr = false; 701 bool skip_unsupport = false; 702 703 if (psp->adev->no_hw_access) 704 return 0; 705 706 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 707 708 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 709 710 index = atomic_inc_return(&psp->fence_value); 711 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 712 if (ret) { 713 atomic_dec(&psp->fence_value); 714 goto exit; 715 } 716 717 amdgpu_device_invalidate_hdp(psp->adev, NULL); 718 while (*((unsigned int *)psp->fence_buf) != index) { 719 if (--timeout == 0) 720 break; 721 /* 722 * Shouldn't wait for timeout when err_event_athub occurs, 723 * because gpu reset thread triggered and lock resource should 724 * be released for psp resume sequence. 725 */ 726 ras_intr = amdgpu_ras_intr_triggered(); 727 if (ras_intr) 728 break; 729 usleep_range(10, 100); 730 amdgpu_device_invalidate_hdp(psp->adev, NULL); 731 } 732 733 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 734 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 735 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 736 737 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 738 739 /* In some cases, psp response status is not 0 even there is no 740 * problem while the command is submitted. Some version of PSP FW 741 * doesn't write 0 to that field. 742 * So here we would like to only print a warning instead of an error 743 * during psp initialization to avoid breaking hw_init and it doesn't 744 * return -EINVAL. 745 */ 746 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 747 if (ucode) 748 dev_warn(psp->adev->dev, 749 "failed to load ucode %s(0x%X) ", 750 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 751 if (psp_err_warn(psp)) 752 dev_warn( 753 psp->adev->dev, 754 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 755 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 756 psp->cmd_buf_mem->cmd_id, 757 psp->cmd_buf_mem->resp.status); 758 /* If any firmware (including CAP) load fails under SRIOV, it should 759 * return failure to stop the VF from initializing. 760 * Also return failure in case of timeout 761 */ 762 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 763 ret = -EINVAL; 764 goto exit; 765 } 766 } 767 768 if (ucode) { 769 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 770 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 771 } 772 773 exit: 774 return ret; 775 } 776 777 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 778 { 779 struct psp_gfx_cmd_resp *cmd = psp->cmd; 780 781 mutex_lock(&psp->mutex); 782 783 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 784 785 return cmd; 786 } 787 788 static void release_psp_cmd_buf(struct psp_context *psp) 789 { 790 mutex_unlock(&psp->mutex); 791 } 792 793 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 794 struct psp_gfx_cmd_resp *cmd, 795 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 796 { 797 struct amdgpu_device *adev = psp->adev; 798 uint32_t size = 0; 799 uint64_t tmr_pa = 0; 800 801 if (tmr_bo) { 802 size = amdgpu_bo_size(tmr_bo); 803 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 804 } 805 806 if (amdgpu_sriov_vf(psp->adev)) 807 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 808 else 809 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 810 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 811 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 812 cmd->cmd.cmd_setup_tmr.buf_size = size; 813 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 814 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 815 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 816 } 817 818 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 819 uint64_t pri_buf_mc, uint32_t size) 820 { 821 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 822 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 823 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 824 cmd->cmd.cmd_load_toc.toc_size = size; 825 } 826 827 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 828 static int psp_load_toc(struct psp_context *psp, 829 uint32_t *tmr_size) 830 { 831 int ret; 832 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 833 834 /* Copy toc to psp firmware private buffer */ 835 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 836 837 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 838 839 ret = psp_cmd_submit_buf(psp, NULL, cmd, 840 psp->fence_buf_mc_addr); 841 if (!ret) 842 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 843 844 release_psp_cmd_buf(psp); 845 846 return ret; 847 } 848 849 /* Set up Trusted Memory Region */ 850 static int psp_tmr_init(struct psp_context *psp) 851 { 852 int ret = 0; 853 int tmr_size; 854 void *tmr_buf; 855 void **pptr; 856 857 /* 858 * According to HW engineer, they prefer the TMR address be "naturally 859 * aligned" , e.g. the start address be an integer divide of TMR size. 860 * 861 * Note: this memory need be reserved till the driver 862 * uninitializes. 863 */ 864 tmr_size = PSP_TMR_SIZE(psp->adev); 865 866 /* For ASICs support RLC autoload, psp will parse the toc 867 * and calculate the total size of TMR needed 868 */ 869 if (!amdgpu_sriov_vf(psp->adev) && 870 psp->toc.start_addr && 871 psp->toc.size_bytes && 872 psp->fw_pri_buf) { 873 ret = psp_load_toc(psp, &tmr_size); 874 if (ret) { 875 dev_err(psp->adev->dev, "Failed to load toc\n"); 876 return ret; 877 } 878 } 879 880 if (!psp->tmr_bo && !psp->boot_time_tmr) { 881 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 882 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 883 PSP_TMR_ALIGNMENT, 884 AMDGPU_HAS_VRAM(psp->adev) ? 885 AMDGPU_GEM_DOMAIN_VRAM : 886 AMDGPU_GEM_DOMAIN_GTT, 887 &psp->tmr_bo, &psp->tmr_mc_addr, 888 pptr); 889 } 890 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo) 891 psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo); 892 893 return ret; 894 } 895 896 static bool psp_skip_tmr(struct psp_context *psp) 897 { 898 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 899 case IP_VERSION(11, 0, 9): 900 case IP_VERSION(11, 0, 7): 901 case IP_VERSION(13, 0, 2): 902 case IP_VERSION(13, 0, 6): 903 case IP_VERSION(13, 0, 10): 904 case IP_VERSION(13, 0, 12): 905 case IP_VERSION(13, 0, 14): 906 return true; 907 default: 908 return false; 909 } 910 } 911 912 static int psp_tmr_load(struct psp_context *psp) 913 { 914 int ret; 915 struct psp_gfx_cmd_resp *cmd; 916 917 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 918 * Already set up by host driver. 919 */ 920 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 921 return 0; 922 923 cmd = acquire_psp_cmd_buf(psp); 924 925 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 926 if (psp->tmr_bo) 927 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 928 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 929 930 ret = psp_cmd_submit_buf(psp, NULL, cmd, 931 psp->fence_buf_mc_addr); 932 933 release_psp_cmd_buf(psp); 934 935 return ret; 936 } 937 938 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 939 struct psp_gfx_cmd_resp *cmd) 940 { 941 if (amdgpu_sriov_vf(psp->adev)) 942 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 943 else 944 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 945 } 946 947 static int psp_tmr_unload(struct psp_context *psp) 948 { 949 int ret; 950 struct psp_gfx_cmd_resp *cmd; 951 952 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 953 * as TMR is not loaded at all 954 */ 955 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 956 return 0; 957 958 cmd = acquire_psp_cmd_buf(psp); 959 960 psp_prep_tmr_unload_cmd_buf(psp, cmd); 961 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 962 963 ret = psp_cmd_submit_buf(psp, NULL, cmd, 964 psp->fence_buf_mc_addr); 965 966 release_psp_cmd_buf(psp); 967 968 return ret; 969 } 970 971 static int psp_tmr_terminate(struct psp_context *psp) 972 { 973 return psp_tmr_unload(psp); 974 } 975 976 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 977 uint64_t *output_ptr) 978 { 979 int ret; 980 struct psp_gfx_cmd_resp *cmd; 981 982 if (!output_ptr) 983 return -EINVAL; 984 985 if (amdgpu_sriov_vf(psp->adev)) 986 return 0; 987 988 cmd = acquire_psp_cmd_buf(psp); 989 990 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 991 992 ret = psp_cmd_submit_buf(psp, NULL, cmd, 993 psp->fence_buf_mc_addr); 994 995 if (!ret) { 996 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 997 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 998 } 999 1000 release_psp_cmd_buf(psp); 1001 1002 return ret; 1003 } 1004 1005 static int psp_get_fw_reservation_info(struct psp_context *psp, 1006 uint32_t cmd_id, 1007 uint64_t *addr, 1008 uint32_t *size) 1009 { 1010 int ret; 1011 uint32_t status; 1012 struct psp_gfx_cmd_resp *cmd; 1013 1014 cmd = acquire_psp_cmd_buf(psp); 1015 1016 cmd->cmd_id = cmd_id; 1017 1018 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1019 psp->fence_buf_mc_addr); 1020 if (ret) { 1021 release_psp_cmd_buf(psp); 1022 return ret; 1023 } 1024 1025 status = cmd->resp.status; 1026 if (status == PSP_ERR_UNKNOWN_COMMAND) { 1027 release_psp_cmd_buf(psp); 1028 *addr = 0; 1029 *size = 0; 1030 return 0; 1031 } 1032 1033 *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 | 1034 cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo; 1035 *size = cmd->resp.uresp.fw_reserve_info.reserve_size; 1036 1037 release_psp_cmd_buf(psp); 1038 1039 return 0; 1040 } 1041 1042 int psp_update_fw_reservation(struct psp_context *psp) 1043 { 1044 int ret; 1045 uint64_t reserv_addr, reserv_addr_ext; 1046 uint32_t reserv_size, reserv_size_ext, mp0_ip_ver; 1047 struct amdgpu_device *adev = psp->adev; 1048 1049 mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0); 1050 1051 if (amdgpu_sriov_vf(psp->adev)) 1052 return 0; 1053 1054 switch (mp0_ip_ver) { 1055 case IP_VERSION(14, 0, 2): 1056 if (adev->psp.sos.fw_version < 0x3b0e0d) 1057 return 0; 1058 break; 1059 1060 case IP_VERSION(14, 0, 3): 1061 if (adev->psp.sos.fw_version < 0x3a0e14) 1062 return 0; 1063 break; 1064 1065 default: 1066 return 0; 1067 } 1068 1069 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); 1070 if (ret) 1071 return ret; 1072 ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext); 1073 if (ret) 1074 return ret; 1075 1076 if (reserv_addr != adev->gmc.real_vram_size - reserv_size) { 1077 dev_warn(adev->dev, "reserve fw region is not valid!\n"); 1078 return 0; 1079 } 1080 1081 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1082 1083 reserv_size = roundup(reserv_size, SZ_1M); 1084 1085 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL); 1086 if (ret) { 1087 dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret); 1088 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); 1089 return ret; 1090 } 1091 1092 reserv_size_ext = roundup(reserv_size_ext, SZ_1M); 1093 1094 ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext, 1095 &adev->mman.fw_reserved_memory_extend, NULL); 1096 if (ret) { 1097 dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret); 1098 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL); 1099 return ret; 1100 } 1101 1102 return 0; 1103 } 1104 1105 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 1106 { 1107 struct psp_context *psp = &adev->psp; 1108 struct psp_gfx_cmd_resp *cmd; 1109 int ret; 1110 1111 if (amdgpu_sriov_vf(adev)) 1112 return 0; 1113 1114 cmd = acquire_psp_cmd_buf(psp); 1115 1116 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1117 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 1118 1119 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1120 if (!ret) { 1121 *boot_cfg = 1122 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 1123 } 1124 1125 release_psp_cmd_buf(psp); 1126 1127 return ret; 1128 } 1129 1130 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 1131 { 1132 int ret; 1133 struct psp_context *psp = &adev->psp; 1134 struct psp_gfx_cmd_resp *cmd; 1135 1136 if (amdgpu_sriov_vf(adev)) 1137 return 0; 1138 1139 cmd = acquire_psp_cmd_buf(psp); 1140 1141 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1142 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 1143 cmd->cmd.boot_cfg.boot_config = boot_cfg; 1144 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 1145 1146 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1147 1148 release_psp_cmd_buf(psp); 1149 1150 return ret; 1151 } 1152 1153 static int psp_rl_load(struct amdgpu_device *adev) 1154 { 1155 int ret; 1156 struct psp_context *psp = &adev->psp; 1157 struct psp_gfx_cmd_resp *cmd; 1158 1159 if (!is_psp_fw_valid(psp->rl)) 1160 return 0; 1161 1162 cmd = acquire_psp_cmd_buf(psp); 1163 1164 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1165 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1166 1167 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1168 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1169 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1170 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1171 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1172 1173 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1174 1175 release_psp_cmd_buf(psp); 1176 1177 return ret; 1178 } 1179 1180 int psp_memory_partition(struct psp_context *psp, int mode) 1181 { 1182 struct psp_gfx_cmd_resp *cmd; 1183 int ret; 1184 1185 if (amdgpu_sriov_vf(psp->adev)) 1186 return 0; 1187 1188 cmd = acquire_psp_cmd_buf(psp); 1189 1190 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; 1191 cmd->cmd.cmd_memory_part.mode = mode; 1192 1193 dev_info(psp->adev->dev, 1194 "Requesting %d memory partition change through PSP", mode); 1195 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1196 if (ret) 1197 dev_err(psp->adev->dev, 1198 "PSP request failed to change to NPS%d mode\n", mode); 1199 1200 release_psp_cmd_buf(psp); 1201 1202 return ret; 1203 } 1204 1205 int psp_spatial_partition(struct psp_context *psp, int mode) 1206 { 1207 struct psp_gfx_cmd_resp *cmd; 1208 int ret; 1209 1210 if (amdgpu_sriov_vf(psp->adev)) 1211 return 0; 1212 1213 cmd = acquire_psp_cmd_buf(psp); 1214 1215 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1216 cmd->cmd.cmd_spatial_part.mode = mode; 1217 1218 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1219 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1220 1221 release_psp_cmd_buf(psp); 1222 1223 return ret; 1224 } 1225 1226 static int psp_asd_initialize(struct psp_context *psp) 1227 { 1228 int ret; 1229 1230 /* If PSP version doesn't match ASD version, asd loading will be failed. 1231 * add workaround to bypass it for sriov now. 1232 * TODO: add version check to make it common 1233 */ 1234 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1235 return 0; 1236 1237 /* bypass asd if display hardware is not available */ 1238 if (!amdgpu_device_has_display_hardware(psp->adev) && 1239 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1240 return 0; 1241 1242 psp->asd_context.mem_context.shared_mc_addr = 0; 1243 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1244 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1245 1246 ret = psp_ta_load(psp, &psp->asd_context); 1247 if (!ret) 1248 psp->asd_context.initialized = true; 1249 1250 return ret; 1251 } 1252 1253 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1254 uint32_t session_id) 1255 { 1256 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1257 cmd->cmd.cmd_unload_ta.session_id = session_id; 1258 } 1259 1260 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1261 { 1262 int ret; 1263 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1264 1265 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1266 1267 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1268 1269 context->resp_status = cmd->resp.status; 1270 1271 release_psp_cmd_buf(psp); 1272 1273 return ret; 1274 } 1275 1276 static int psp_asd_terminate(struct psp_context *psp) 1277 { 1278 int ret; 1279 1280 if (amdgpu_sriov_vf(psp->adev)) 1281 return 0; 1282 1283 if (!psp->asd_context.initialized) 1284 return 0; 1285 1286 ret = psp_ta_unload(psp, &psp->asd_context); 1287 if (!ret) 1288 psp->asd_context.initialized = false; 1289 1290 return ret; 1291 } 1292 1293 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1294 uint32_t id, uint32_t value) 1295 { 1296 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1297 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1298 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1299 } 1300 1301 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1302 uint32_t value) 1303 { 1304 struct psp_gfx_cmd_resp *cmd; 1305 int ret = 0; 1306 1307 if (reg >= PSP_REG_LAST) 1308 return -EINVAL; 1309 1310 cmd = acquire_psp_cmd_buf(psp); 1311 1312 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1313 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1314 if (ret) 1315 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1316 1317 release_psp_cmd_buf(psp); 1318 1319 return ret; 1320 } 1321 1322 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1323 uint64_t ta_bin_mc, 1324 struct ta_context *context) 1325 { 1326 cmd->cmd_id = context->ta_load_type; 1327 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1328 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1329 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1330 1331 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1332 lower_32_bits(context->mem_context.shared_mc_addr); 1333 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1334 upper_32_bits(context->mem_context.shared_mc_addr); 1335 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1336 } 1337 1338 int psp_ta_init_shared_buf(struct psp_context *psp, 1339 struct ta_mem_context *mem_ctx) 1340 { 1341 /* 1342 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1343 * physical) for ta to host memory 1344 */ 1345 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1346 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1347 AMDGPU_GEM_DOMAIN_GTT, 1348 &mem_ctx->shared_bo, 1349 &mem_ctx->shared_mc_addr, 1350 &mem_ctx->shared_buf); 1351 } 1352 1353 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1354 uint32_t ta_cmd_id, 1355 uint32_t session_id) 1356 { 1357 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1358 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1359 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1360 } 1361 1362 int psp_ta_invoke(struct psp_context *psp, 1363 uint32_t ta_cmd_id, 1364 struct ta_context *context) 1365 { 1366 int ret; 1367 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1368 1369 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1370 1371 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1372 psp->fence_buf_mc_addr); 1373 1374 context->resp_status = cmd->resp.status; 1375 1376 release_psp_cmd_buf(psp); 1377 1378 return ret; 1379 } 1380 1381 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1382 { 1383 int ret; 1384 struct psp_gfx_cmd_resp *cmd; 1385 1386 cmd = acquire_psp_cmd_buf(psp); 1387 1388 psp_copy_fw(psp, context->bin_desc.start_addr, 1389 context->bin_desc.size_bytes); 1390 1391 if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && 1392 context->mem_context.shared_bo) 1393 context->mem_context.shared_mc_addr = 1394 amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo); 1395 1396 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1397 1398 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1399 psp->fence_buf_mc_addr); 1400 1401 context->resp_status = cmd->resp.status; 1402 1403 if (!ret) 1404 context->session_id = cmd->resp.session_id; 1405 1406 release_psp_cmd_buf(psp); 1407 1408 return ret; 1409 } 1410 1411 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1412 { 1413 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1414 } 1415 1416 int psp_xgmi_terminate(struct psp_context *psp) 1417 { 1418 int ret; 1419 struct amdgpu_device *adev = psp->adev; 1420 1421 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1422 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1423 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1424 adev->gmc.xgmi.connected_to_cpu)) 1425 return 0; 1426 1427 if (!psp->xgmi_context.context.initialized) 1428 return 0; 1429 1430 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1431 1432 psp->xgmi_context.context.initialized = false; 1433 1434 return ret; 1435 } 1436 1437 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1438 { 1439 struct ta_xgmi_shared_memory *xgmi_cmd; 1440 int ret; 1441 1442 if (!psp->ta_fw || 1443 !psp->xgmi_context.context.bin_desc.size_bytes || 1444 !psp->xgmi_context.context.bin_desc.start_addr) 1445 return -ENOENT; 1446 1447 if (!load_ta) 1448 goto invoke; 1449 1450 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1451 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1452 1453 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1454 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1455 if (ret) 1456 return ret; 1457 } 1458 1459 /* Load XGMI TA */ 1460 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1461 if (!ret) 1462 psp->xgmi_context.context.initialized = true; 1463 else 1464 return ret; 1465 1466 invoke: 1467 /* Initialize XGMI session */ 1468 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1469 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1470 xgmi_cmd->flag_extend_link_record = set_extended_data; 1471 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1472 1473 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1474 /* note down the capbility flag for XGMI TA */ 1475 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1476 1477 return ret; 1478 } 1479 1480 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1481 { 1482 struct ta_xgmi_shared_memory *xgmi_cmd; 1483 int ret; 1484 1485 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1486 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1487 1488 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1489 1490 /* Invoke xgmi ta to get hive id */ 1491 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1492 if (ret) 1493 return ret; 1494 1495 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1496 1497 return 0; 1498 } 1499 1500 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1501 { 1502 struct ta_xgmi_shared_memory *xgmi_cmd; 1503 int ret; 1504 1505 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1506 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1507 1508 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1509 1510 /* Invoke xgmi ta to get the node id */ 1511 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1512 if (ret) 1513 return ret; 1514 1515 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1516 1517 return 0; 1518 } 1519 1520 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1521 { 1522 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1523 IP_VERSION(13, 0, 2) && 1524 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1525 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1526 IP_VERSION(13, 0, 6); 1527 } 1528 1529 /* 1530 * Chips that support extended topology information require the driver to 1531 * reflect topology information in the opposite direction. This is 1532 * because the TA has already exceeded its link record limit and if the 1533 * TA holds bi-directional information, the driver would have to do 1534 * multiple fetches instead of just two. 1535 */ 1536 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1537 struct psp_xgmi_node_info node_info) 1538 { 1539 struct amdgpu_device *mirror_adev; 1540 struct amdgpu_hive_info *hive; 1541 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1542 uint64_t dst_node_id = node_info.node_id; 1543 uint8_t dst_num_hops = node_info.num_hops; 1544 uint8_t dst_num_links = node_info.num_links; 1545 1546 hive = amdgpu_get_xgmi_hive(psp->adev); 1547 if (WARN_ON(!hive)) 1548 return; 1549 1550 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1551 struct psp_xgmi_topology_info *mirror_top_info; 1552 int j; 1553 1554 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1555 continue; 1556 1557 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1558 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1559 if (mirror_top_info->nodes[j].node_id != src_node_id) 1560 continue; 1561 1562 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1563 /* 1564 * prevent 0 num_links value re-reflection since reflection 1565 * criteria is based on num_hops (direct or indirect). 1566 * 1567 */ 1568 if (dst_num_links) 1569 mirror_top_info->nodes[j].num_links = dst_num_links; 1570 1571 break; 1572 } 1573 1574 break; 1575 } 1576 1577 amdgpu_put_xgmi_hive(hive); 1578 } 1579 1580 int psp_xgmi_get_topology_info(struct psp_context *psp, 1581 int number_devices, 1582 struct psp_xgmi_topology_info *topology, 1583 bool get_extended_data) 1584 { 1585 struct ta_xgmi_shared_memory *xgmi_cmd; 1586 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1587 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1588 int i; 1589 int ret; 1590 1591 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1592 return -EINVAL; 1593 1594 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1595 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1596 xgmi_cmd->flag_extend_link_record = get_extended_data; 1597 1598 /* Fill in the shared memory with topology information as input */ 1599 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1600 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1601 topology_info_input->num_nodes = number_devices; 1602 1603 for (i = 0; i < topology_info_input->num_nodes; i++) { 1604 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1605 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1606 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1607 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1608 } 1609 1610 /* Invoke xgmi ta to get the topology information */ 1611 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1612 if (ret) 1613 return ret; 1614 1615 /* Read the output topology information from the shared memory */ 1616 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1617 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1618 for (i = 0; i < topology->num_nodes; i++) { 1619 /* extended data will either be 0 or equal to non-extended data */ 1620 if (topology_info_output->nodes[i].num_hops) 1621 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1622 1623 /* non-extended data gets everything here so no need to update */ 1624 if (!get_extended_data) { 1625 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1626 topology->nodes[i].is_sharing_enabled = 1627 topology_info_output->nodes[i].is_sharing_enabled; 1628 topology->nodes[i].sdma_engine = 1629 topology_info_output->nodes[i].sdma_engine; 1630 } 1631 1632 } 1633 1634 /* Invoke xgmi ta again to get the link information */ 1635 if (psp_xgmi_peer_link_info_supported(psp)) { 1636 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1637 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1638 bool requires_reflection = 1639 (psp->xgmi_context.supports_extended_data && 1640 get_extended_data) || 1641 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1642 IP_VERSION(13, 0, 6) || 1643 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1644 IP_VERSION(13, 0, 14); 1645 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1646 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1647 1648 /* popluate the shared output buffer rather than the cmd input buffer 1649 * with node_ids as the input for GET_PEER_LINKS command execution. 1650 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1651 * The same requirement for GET_EXTEND_PEER_LINKS command. 1652 */ 1653 if (ta_port_num_support) { 1654 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1655 1656 for (i = 0; i < topology->num_nodes; i++) 1657 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1658 1659 link_extend_info_output->num_nodes = topology->num_nodes; 1660 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1661 } else { 1662 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1663 1664 for (i = 0; i < topology->num_nodes; i++) 1665 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1666 1667 link_info_output->num_nodes = topology->num_nodes; 1668 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1669 } 1670 1671 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1672 if (ret) 1673 return ret; 1674 1675 for (i = 0; i < topology->num_nodes; i++) { 1676 uint8_t node_num_links = ta_port_num_support ? 1677 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1678 /* accumulate num_links on extended data */ 1679 if (get_extended_data) { 1680 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1681 } else { 1682 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1683 topology->nodes[i].num_links : node_num_links; 1684 } 1685 /* popluate the connected port num info if supported and available */ 1686 if (ta_port_num_support && topology->nodes[i].num_links) { 1687 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1688 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1689 } 1690 1691 /* reflect the topology information for bi-directionality */ 1692 if (requires_reflection && topology->nodes[i].num_hops) 1693 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1694 } 1695 } 1696 1697 return 0; 1698 } 1699 1700 int psp_xgmi_set_topology_info(struct psp_context *psp, 1701 int number_devices, 1702 struct psp_xgmi_topology_info *topology) 1703 { 1704 struct ta_xgmi_shared_memory *xgmi_cmd; 1705 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1706 int i; 1707 1708 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1709 return -EINVAL; 1710 1711 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1712 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1713 1714 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1715 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1716 topology_info_input->num_nodes = number_devices; 1717 1718 for (i = 0; i < topology_info_input->num_nodes; i++) { 1719 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1720 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1721 topology_info_input->nodes[i].is_sharing_enabled = 1; 1722 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1723 } 1724 1725 /* Invoke xgmi ta to set topology information */ 1726 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1727 } 1728 1729 // ras begin 1730 static void psp_ras_ta_check_status(struct psp_context *psp) 1731 { 1732 struct ta_ras_shared_memory *ras_cmd = 1733 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1734 1735 switch (ras_cmd->ras_status) { 1736 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1737 dev_warn(psp->adev->dev, 1738 "RAS WARNING: cmd failed due to unsupported ip\n"); 1739 break; 1740 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1741 dev_warn(psp->adev->dev, 1742 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1743 break; 1744 case TA_RAS_STATUS__SUCCESS: 1745 break; 1746 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1747 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1748 dev_warn(psp->adev->dev, 1749 "RAS WARNING: Inject error to critical region is not allowed\n"); 1750 break; 1751 default: 1752 dev_warn(psp->adev->dev, 1753 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1754 break; 1755 } 1756 } 1757 1758 static int psp_ras_send_cmd(struct psp_context *psp, 1759 enum ras_command cmd_id, void *in, void *out) 1760 { 1761 struct ta_ras_shared_memory *ras_cmd; 1762 uint32_t cmd = cmd_id; 1763 int ret = 0; 1764 1765 if (!in) 1766 return -EINVAL; 1767 1768 mutex_lock(&psp->ras_context.mutex); 1769 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1770 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1771 1772 switch (cmd) { 1773 case TA_RAS_COMMAND__ENABLE_FEATURES: 1774 case TA_RAS_COMMAND__DISABLE_FEATURES: 1775 memcpy(&ras_cmd->ras_in_message, 1776 in, sizeof(ras_cmd->ras_in_message)); 1777 break; 1778 case TA_RAS_COMMAND__TRIGGER_ERROR: 1779 memcpy(&ras_cmd->ras_in_message.trigger_error, 1780 in, sizeof(ras_cmd->ras_in_message.trigger_error)); 1781 break; 1782 case TA_RAS_COMMAND__QUERY_ADDRESS: 1783 memcpy(&ras_cmd->ras_in_message.address, 1784 in, sizeof(ras_cmd->ras_in_message.address)); 1785 break; 1786 default: 1787 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd); 1788 ret = -EINVAL; 1789 goto err_out; 1790 } 1791 1792 ras_cmd->cmd_id = cmd; 1793 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1794 1795 switch (cmd) { 1796 case TA_RAS_COMMAND__TRIGGER_ERROR: 1797 if (!ret && out) 1798 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status)); 1799 break; 1800 case TA_RAS_COMMAND__QUERY_ADDRESS: 1801 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1802 ret = -EINVAL; 1803 else if (out) 1804 memcpy(out, 1805 &ras_cmd->ras_out_message.address, 1806 sizeof(ras_cmd->ras_out_message.address)); 1807 break; 1808 default: 1809 break; 1810 } 1811 1812 err_out: 1813 mutex_unlock(&psp->ras_context.mutex); 1814 1815 return ret; 1816 } 1817 1818 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1819 { 1820 struct ta_ras_shared_memory *ras_cmd; 1821 int ret; 1822 1823 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1824 1825 /* 1826 * TODO: bypass the loading in sriov for now 1827 */ 1828 if (amdgpu_sriov_vf(psp->adev)) 1829 return 0; 1830 1831 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1832 1833 if (amdgpu_ras_intr_triggered()) 1834 return ret; 1835 1836 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1837 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1838 return -EINVAL; 1839 } 1840 1841 if (!ret) { 1842 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1843 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1844 1845 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1846 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1847 dev_warn(psp->adev->dev, 1848 "RAS internal register access blocked\n"); 1849 1850 psp_ras_ta_check_status(psp); 1851 } 1852 1853 return ret; 1854 } 1855 1856 int psp_ras_enable_features(struct psp_context *psp, 1857 union ta_ras_cmd_input *info, bool enable) 1858 { 1859 enum ras_command cmd_id; 1860 int ret; 1861 1862 if (!psp->ras_context.context.initialized || !info) 1863 return -EINVAL; 1864 1865 cmd_id = enable ? 1866 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES; 1867 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL); 1868 if (ret) 1869 return -EINVAL; 1870 1871 return 0; 1872 } 1873 1874 int psp_ras_terminate(struct psp_context *psp) 1875 { 1876 int ret; 1877 1878 /* 1879 * TODO: bypass the terminate in sriov for now 1880 */ 1881 if (amdgpu_sriov_vf(psp->adev)) 1882 return 0; 1883 1884 if (!psp->ras_context.context.initialized) 1885 return 0; 1886 1887 ret = psp_ta_unload(psp, &psp->ras_context.context); 1888 1889 psp->ras_context.context.initialized = false; 1890 1891 mutex_destroy(&psp->ras_context.mutex); 1892 1893 return ret; 1894 } 1895 1896 int psp_ras_initialize(struct psp_context *psp) 1897 { 1898 int ret; 1899 uint32_t boot_cfg = 0xFF; 1900 struct amdgpu_device *adev = psp->adev; 1901 struct ta_ras_shared_memory *ras_cmd; 1902 1903 /* 1904 * TODO: bypass the initialize in sriov for now 1905 */ 1906 if (amdgpu_sriov_vf(adev)) 1907 return 0; 1908 1909 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1910 !adev->psp.ras_context.context.bin_desc.start_addr) { 1911 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1912 return 0; 1913 } 1914 1915 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1916 /* query GECC enablement status from boot config 1917 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1918 */ 1919 ret = psp_boot_config_get(adev, &boot_cfg); 1920 if (ret) 1921 dev_warn(adev->dev, "PSP get boot config failed\n"); 1922 1923 if (boot_cfg == 1 && !adev->ras_default_ecc_enabled && 1924 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1925 dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n"); 1926 dev_warn(adev->dev, 1927 "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n"); 1928 } else { 1929 if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) && 1930 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1931 if (boot_cfg == 1) { 1932 dev_info(adev->dev, "GECC is enabled\n"); 1933 } else { 1934 /* enable GECC in next boot cycle if it is disabled 1935 * in boot config, or force enable GECC if failed to 1936 * get boot configuration 1937 */ 1938 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1939 if (ret) 1940 dev_warn(adev->dev, "PSP set boot config failed\n"); 1941 else 1942 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1943 } 1944 } else { 1945 if (!boot_cfg) { 1946 if (!adev->ras_default_ecc_enabled && 1947 amdgpu_ras_enable != 1 && 1948 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 1949 dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n"); 1950 else 1951 dev_info(adev->dev, "GECC is disabled\n"); 1952 } else { 1953 /* disable GECC in next boot cycle if ras is 1954 * disabled by module parameter amdgpu_ras_enable 1955 * and/or amdgpu_ras_mask, or boot_config_get call 1956 * is failed 1957 */ 1958 ret = psp_boot_config_set(adev, 0); 1959 if (ret) 1960 dev_warn(adev->dev, "PSP set boot config failed\n"); 1961 else 1962 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1963 } 1964 } 1965 } 1966 } 1967 1968 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1969 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1970 1971 if (!psp->ras_context.context.mem_context.shared_buf) { 1972 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1973 if (ret) 1974 return ret; 1975 } 1976 1977 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1978 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1979 1980 if (amdgpu_ras_is_poison_mode_supported(adev)) 1981 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1982 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1983 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1984 ras_cmd->ras_in_message.init_flags.xcc_mask = 1985 adev->gfx.xcc_mask; 1986 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1987 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 1988 ras_cmd->ras_in_message.init_flags.nps_mode = 1989 adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 1990 ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask; 1991 1992 ret = psp_ta_load(psp, &psp->ras_context.context); 1993 1994 if (!ret && !ras_cmd->ras_status) { 1995 psp->ras_context.context.initialized = true; 1996 mutex_init(&psp->ras_context.mutex); 1997 } else { 1998 if (ras_cmd->ras_status) 1999 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 2000 2001 /* fail to load RAS TA */ 2002 psp->ras_context.context.initialized = false; 2003 } 2004 2005 return ret; 2006 } 2007 2008 int psp_ras_trigger_error(struct psp_context *psp, 2009 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 2010 { 2011 struct amdgpu_device *adev = psp->adev; 2012 int ret; 2013 uint32_t dev_mask; 2014 uint32_t ras_status = 0; 2015 2016 if (!psp->ras_context.context.initialized || !info) 2017 return -EINVAL; 2018 2019 switch (info->block_id) { 2020 case TA_RAS_BLOCK__GFX: 2021 dev_mask = GET_MASK(GC, instance_mask); 2022 break; 2023 case TA_RAS_BLOCK__SDMA: 2024 dev_mask = GET_MASK(SDMA0, instance_mask); 2025 break; 2026 case TA_RAS_BLOCK__VCN: 2027 case TA_RAS_BLOCK__JPEG: 2028 dev_mask = GET_MASK(VCN, instance_mask); 2029 break; 2030 default: 2031 dev_mask = instance_mask; 2032 break; 2033 } 2034 2035 /* reuse sub_block_index for backward compatibility */ 2036 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 2037 dev_mask &= AMDGPU_RAS_INST_MASK; 2038 info->sub_block_index |= dev_mask; 2039 2040 ret = psp_ras_send_cmd(psp, 2041 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status); 2042 if (ret) 2043 return -EINVAL; 2044 2045 /* If err_event_athub occurs error inject was successful, however 2046 * return status from TA is no long reliable 2047 */ 2048 if (amdgpu_ras_intr_triggered()) 2049 return 0; 2050 2051 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 2052 return -EACCES; 2053 else if (ras_status) 2054 return -EINVAL; 2055 2056 return 0; 2057 } 2058 2059 int psp_ras_query_address(struct psp_context *psp, 2060 struct ta_ras_query_address_input *addr_in, 2061 struct ta_ras_query_address_output *addr_out) 2062 { 2063 int ret; 2064 2065 if (!psp->ras_context.context.initialized || 2066 !addr_in || !addr_out) 2067 return -EINVAL; 2068 2069 ret = psp_ras_send_cmd(psp, 2070 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out); 2071 2072 return ret; 2073 } 2074 // ras end 2075 2076 // HDCP start 2077 static int psp_hdcp_initialize(struct psp_context *psp) 2078 { 2079 int ret; 2080 2081 /* 2082 * TODO: bypass the initialize in sriov for now 2083 */ 2084 if (amdgpu_sriov_vf(psp->adev)) 2085 return 0; 2086 2087 /* bypass hdcp initialization if dmu is harvested */ 2088 if (!amdgpu_device_has_display_hardware(psp->adev)) 2089 return 0; 2090 2091 if (!psp->hdcp_context.context.bin_desc.size_bytes || 2092 !psp->hdcp_context.context.bin_desc.start_addr) { 2093 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 2094 return 0; 2095 } 2096 2097 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 2098 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2099 2100 if (!psp->hdcp_context.context.mem_context.shared_buf) { 2101 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 2102 if (ret) 2103 return ret; 2104 } 2105 2106 ret = psp_ta_load(psp, &psp->hdcp_context.context); 2107 if (!ret) { 2108 psp->hdcp_context.context.initialized = true; 2109 mutex_init(&psp->hdcp_context.mutex); 2110 } 2111 2112 return ret; 2113 } 2114 2115 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2116 { 2117 /* 2118 * TODO: bypass the loading in sriov for now 2119 */ 2120 if (amdgpu_sriov_vf(psp->adev)) 2121 return 0; 2122 2123 if (!psp->hdcp_context.context.initialized) 2124 return 0; 2125 2126 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 2127 } 2128 2129 static int psp_hdcp_terminate(struct psp_context *psp) 2130 { 2131 int ret; 2132 2133 /* 2134 * TODO: bypass the terminate in sriov for now 2135 */ 2136 if (amdgpu_sriov_vf(psp->adev)) 2137 return 0; 2138 2139 if (!psp->hdcp_context.context.initialized) 2140 return 0; 2141 2142 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 2143 2144 psp->hdcp_context.context.initialized = false; 2145 2146 return ret; 2147 } 2148 // HDCP end 2149 2150 // DTM start 2151 static int psp_dtm_initialize(struct psp_context *psp) 2152 { 2153 int ret; 2154 2155 /* 2156 * TODO: bypass the initialize in sriov for now 2157 */ 2158 if (amdgpu_sriov_vf(psp->adev)) 2159 return 0; 2160 2161 /* bypass dtm initialization if dmu is harvested */ 2162 if (!amdgpu_device_has_display_hardware(psp->adev)) 2163 return 0; 2164 2165 if (!psp->dtm_context.context.bin_desc.size_bytes || 2166 !psp->dtm_context.context.bin_desc.start_addr) { 2167 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 2168 return 0; 2169 } 2170 2171 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 2172 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2173 2174 if (!psp->dtm_context.context.mem_context.shared_buf) { 2175 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 2176 if (ret) 2177 return ret; 2178 } 2179 2180 ret = psp_ta_load(psp, &psp->dtm_context.context); 2181 if (!ret) { 2182 psp->dtm_context.context.initialized = true; 2183 mutex_init(&psp->dtm_context.mutex); 2184 } 2185 2186 return ret; 2187 } 2188 2189 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2190 { 2191 /* 2192 * TODO: bypass the loading in sriov for now 2193 */ 2194 if (amdgpu_sriov_vf(psp->adev)) 2195 return 0; 2196 2197 if (!psp->dtm_context.context.initialized) 2198 return 0; 2199 2200 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 2201 } 2202 2203 static int psp_dtm_terminate(struct psp_context *psp) 2204 { 2205 int ret; 2206 2207 /* 2208 * TODO: bypass the terminate in sriov for now 2209 */ 2210 if (amdgpu_sriov_vf(psp->adev)) 2211 return 0; 2212 2213 if (!psp->dtm_context.context.initialized) 2214 return 0; 2215 2216 ret = psp_ta_unload(psp, &psp->dtm_context.context); 2217 2218 psp->dtm_context.context.initialized = false; 2219 2220 return ret; 2221 } 2222 // DTM end 2223 2224 // RAP start 2225 static int psp_rap_initialize(struct psp_context *psp) 2226 { 2227 int ret; 2228 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 2229 2230 /* 2231 * TODO: bypass the initialize in sriov for now 2232 */ 2233 if (amdgpu_sriov_vf(psp->adev)) 2234 return 0; 2235 2236 if (!psp->rap_context.context.bin_desc.size_bytes || 2237 !psp->rap_context.context.bin_desc.start_addr) { 2238 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 2239 return 0; 2240 } 2241 2242 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 2243 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2244 2245 if (!psp->rap_context.context.mem_context.shared_buf) { 2246 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 2247 if (ret) 2248 return ret; 2249 } 2250 2251 ret = psp_ta_load(psp, &psp->rap_context.context); 2252 if (!ret) { 2253 psp->rap_context.context.initialized = true; 2254 mutex_init(&psp->rap_context.mutex); 2255 } else 2256 return ret; 2257 2258 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2259 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2260 psp_rap_terminate(psp); 2261 /* free rap shared memory */ 2262 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2263 2264 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2265 ret, status); 2266 2267 return ret; 2268 } 2269 2270 return 0; 2271 } 2272 2273 static int psp_rap_terminate(struct psp_context *psp) 2274 { 2275 int ret; 2276 2277 if (!psp->rap_context.context.initialized) 2278 return 0; 2279 2280 ret = psp_ta_unload(psp, &psp->rap_context.context); 2281 2282 psp->rap_context.context.initialized = false; 2283 2284 return ret; 2285 } 2286 2287 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2288 { 2289 struct ta_rap_shared_memory *rap_cmd; 2290 int ret = 0; 2291 2292 if (!psp->rap_context.context.initialized) 2293 return 0; 2294 2295 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2296 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2297 return -EINVAL; 2298 2299 mutex_lock(&psp->rap_context.mutex); 2300 2301 rap_cmd = (struct ta_rap_shared_memory *) 2302 psp->rap_context.context.mem_context.shared_buf; 2303 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2304 2305 rap_cmd->cmd_id = ta_cmd_id; 2306 rap_cmd->validation_method_id = METHOD_A; 2307 2308 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2309 if (ret) 2310 goto out_unlock; 2311 2312 if (status) 2313 *status = rap_cmd->rap_status; 2314 2315 out_unlock: 2316 mutex_unlock(&psp->rap_context.mutex); 2317 2318 return ret; 2319 } 2320 // RAP end 2321 2322 /* securedisplay start */ 2323 static int psp_securedisplay_initialize(struct psp_context *psp) 2324 { 2325 int ret; 2326 struct ta_securedisplay_cmd *securedisplay_cmd; 2327 2328 /* 2329 * TODO: bypass the initialize in sriov for now 2330 */ 2331 if (amdgpu_sriov_vf(psp->adev)) 2332 return 0; 2333 2334 /* bypass securedisplay initialization if dmu is harvested */ 2335 if (!amdgpu_device_has_display_hardware(psp->adev)) 2336 return 0; 2337 2338 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2339 !psp->securedisplay_context.context.bin_desc.start_addr) { 2340 dev_info(psp->adev->dev, 2341 "SECUREDISPLAY: optional securedisplay ta ucode is not available\n"); 2342 return 0; 2343 } 2344 2345 psp->securedisplay_context.context.mem_context.shared_mem_size = 2346 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2347 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2348 2349 if (!psp->securedisplay_context.context.initialized) { 2350 ret = psp_ta_init_shared_buf(psp, 2351 &psp->securedisplay_context.context.mem_context); 2352 if (ret) 2353 return ret; 2354 } 2355 2356 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2357 if (!ret) { 2358 psp->securedisplay_context.context.initialized = true; 2359 mutex_init(&psp->securedisplay_context.mutex); 2360 } else 2361 return ret; 2362 2363 mutex_lock(&psp->securedisplay_context.mutex); 2364 2365 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2366 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2367 2368 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2369 2370 mutex_unlock(&psp->securedisplay_context.mutex); 2371 2372 if (ret) { 2373 psp_securedisplay_terminate(psp); 2374 /* free securedisplay shared memory */ 2375 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2376 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2377 return -EINVAL; 2378 } 2379 2380 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2381 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2382 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2383 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2384 /* don't try again */ 2385 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2386 } 2387 2388 return 0; 2389 } 2390 2391 static int psp_securedisplay_terminate(struct psp_context *psp) 2392 { 2393 int ret; 2394 2395 /* 2396 * TODO:bypass the terminate in sriov for now 2397 */ 2398 if (amdgpu_sriov_vf(psp->adev)) 2399 return 0; 2400 2401 if (!psp->securedisplay_context.context.initialized) 2402 return 0; 2403 2404 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2405 2406 psp->securedisplay_context.context.initialized = false; 2407 2408 return ret; 2409 } 2410 2411 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2412 { 2413 int ret; 2414 2415 if (!psp->securedisplay_context.context.initialized) 2416 return -EINVAL; 2417 2418 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2419 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC && 2420 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2) 2421 return -EINVAL; 2422 2423 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2424 2425 return ret; 2426 } 2427 /* SECUREDISPLAY end */ 2428 2429 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2430 { 2431 struct psp_context *psp = &adev->psp; 2432 int ret = 0; 2433 2434 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2435 ret = psp->funcs->wait_for_bootloader(psp); 2436 2437 return ret; 2438 } 2439 2440 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2441 { 2442 if (psp->funcs && 2443 psp->funcs->get_ras_capability) { 2444 return psp->funcs->get_ras_capability(psp); 2445 } else { 2446 return false; 2447 } 2448 } 2449 2450 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) 2451 { 2452 struct psp_context *psp = &adev->psp; 2453 2454 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2455 return false; 2456 2457 if (psp->funcs && psp->funcs->is_reload_needed) 2458 return psp->funcs->is_reload_needed(psp); 2459 2460 return false; 2461 } 2462 2463 static void psp_update_gpu_addresses(struct amdgpu_device *adev) 2464 { 2465 struct psp_context *psp = &adev->psp; 2466 2467 if (psp->cmd_buf_bo && psp->cmd_buf_mem) { 2468 psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo); 2469 psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo); 2470 psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo); 2471 } 2472 if (adev->firmware.rbuf && psp->km_ring.ring_mem) 2473 psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf); 2474 } 2475 2476 static int psp_hw_start(struct psp_context *psp) 2477 { 2478 struct amdgpu_device *adev = psp->adev; 2479 int ret; 2480 2481 if (amdgpu_virt_xgmi_migrate_enabled(adev)) 2482 psp_update_gpu_addresses(adev); 2483 2484 if (!amdgpu_sriov_vf(adev)) { 2485 if ((is_psp_fw_valid(psp->kdb)) && 2486 (psp->funcs->bootloader_load_kdb != NULL)) { 2487 ret = psp_bootloader_load_kdb(psp); 2488 if (ret) { 2489 dev_err(adev->dev, "PSP load kdb failed!\n"); 2490 return ret; 2491 } 2492 } 2493 2494 if ((is_psp_fw_valid(psp->spl)) && 2495 (psp->funcs->bootloader_load_spl != NULL)) { 2496 ret = psp_bootloader_load_spl(psp); 2497 if (ret) { 2498 dev_err(adev->dev, "PSP load spl failed!\n"); 2499 return ret; 2500 } 2501 } 2502 2503 if ((is_psp_fw_valid(psp->sys)) && 2504 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2505 ret = psp_bootloader_load_sysdrv(psp); 2506 if (ret) { 2507 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2508 return ret; 2509 } 2510 } 2511 2512 if ((is_psp_fw_valid(psp->soc_drv)) && 2513 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2514 ret = psp_bootloader_load_soc_drv(psp); 2515 if (ret) { 2516 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2517 return ret; 2518 } 2519 } 2520 2521 if ((is_psp_fw_valid(psp->intf_drv)) && 2522 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2523 ret = psp_bootloader_load_intf_drv(psp); 2524 if (ret) { 2525 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2526 return ret; 2527 } 2528 } 2529 2530 if ((is_psp_fw_valid(psp->dbg_drv)) && 2531 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2532 ret = psp_bootloader_load_dbg_drv(psp); 2533 if (ret) { 2534 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2535 return ret; 2536 } 2537 } 2538 2539 if ((is_psp_fw_valid(psp->ras_drv)) && 2540 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2541 ret = psp_bootloader_load_ras_drv(psp); 2542 if (ret) { 2543 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2544 return ret; 2545 } 2546 } 2547 2548 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2549 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2550 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2551 if (ret) { 2552 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2553 return ret; 2554 } 2555 } 2556 2557 if ((is_psp_fw_valid(psp->spdm_drv)) && 2558 (psp->funcs->bootloader_load_spdm_drv != NULL)) { 2559 ret = psp_bootloader_load_spdm_drv(psp); 2560 if (ret) { 2561 dev_err(adev->dev, "PSP load spdm_drv failed!\n"); 2562 return ret; 2563 } 2564 } 2565 2566 if ((is_psp_fw_valid(psp->sos)) && 2567 (psp->funcs->bootloader_load_sos != NULL)) { 2568 ret = psp_bootloader_load_sos(psp); 2569 if (ret) { 2570 dev_err(adev->dev, "PSP load sos failed!\n"); 2571 return ret; 2572 } 2573 } 2574 } 2575 2576 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2577 if (ret) { 2578 dev_err(adev->dev, "PSP create ring failed!\n"); 2579 return ret; 2580 } 2581 2582 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2583 ret = psp_update_fw_reservation(psp); 2584 if (ret) { 2585 dev_err(adev->dev, "update fw reservation failed!\n"); 2586 return ret; 2587 } 2588 } 2589 2590 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2591 goto skip_pin_bo; 2592 2593 if (!psp->boot_time_tmr || psp->autoload_supported) { 2594 ret = psp_tmr_init(psp); 2595 if (ret) { 2596 dev_err(adev->dev, "PSP tmr init failed!\n"); 2597 return ret; 2598 } 2599 } 2600 2601 skip_pin_bo: 2602 /* 2603 * For ASICs with DF Cstate management centralized 2604 * to PMFW, TMR setup should be performed after PMFW 2605 * loaded and before other non-psp firmware loaded. 2606 */ 2607 if (psp->pmfw_centralized_cstate_management) { 2608 ret = psp_load_smu_fw(psp); 2609 if (ret) 2610 return ret; 2611 } 2612 2613 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2614 ret = psp_tmr_load(psp); 2615 if (ret) { 2616 dev_err(adev->dev, "PSP load tmr failed!\n"); 2617 return ret; 2618 } 2619 } 2620 2621 return 0; 2622 } 2623 2624 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2625 enum psp_gfx_fw_type *type) 2626 { 2627 switch (ucode->ucode_id) { 2628 case AMDGPU_UCODE_ID_CAP: 2629 *type = GFX_FW_TYPE_CAP; 2630 break; 2631 case AMDGPU_UCODE_ID_SDMA0: 2632 *type = GFX_FW_TYPE_SDMA0; 2633 break; 2634 case AMDGPU_UCODE_ID_SDMA1: 2635 *type = GFX_FW_TYPE_SDMA1; 2636 break; 2637 case AMDGPU_UCODE_ID_SDMA2: 2638 *type = GFX_FW_TYPE_SDMA2; 2639 break; 2640 case AMDGPU_UCODE_ID_SDMA3: 2641 *type = GFX_FW_TYPE_SDMA3; 2642 break; 2643 case AMDGPU_UCODE_ID_SDMA4: 2644 *type = GFX_FW_TYPE_SDMA4; 2645 break; 2646 case AMDGPU_UCODE_ID_SDMA5: 2647 *type = GFX_FW_TYPE_SDMA5; 2648 break; 2649 case AMDGPU_UCODE_ID_SDMA6: 2650 *type = GFX_FW_TYPE_SDMA6; 2651 break; 2652 case AMDGPU_UCODE_ID_SDMA7: 2653 *type = GFX_FW_TYPE_SDMA7; 2654 break; 2655 case AMDGPU_UCODE_ID_CP_MES: 2656 *type = GFX_FW_TYPE_CP_MES; 2657 break; 2658 case AMDGPU_UCODE_ID_CP_MES_DATA: 2659 *type = GFX_FW_TYPE_MES_STACK; 2660 break; 2661 case AMDGPU_UCODE_ID_CP_MES1: 2662 *type = GFX_FW_TYPE_CP_MES_KIQ; 2663 break; 2664 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2665 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2666 break; 2667 case AMDGPU_UCODE_ID_CP_CE: 2668 *type = GFX_FW_TYPE_CP_CE; 2669 break; 2670 case AMDGPU_UCODE_ID_CP_PFP: 2671 *type = GFX_FW_TYPE_CP_PFP; 2672 break; 2673 case AMDGPU_UCODE_ID_CP_ME: 2674 *type = GFX_FW_TYPE_CP_ME; 2675 break; 2676 case AMDGPU_UCODE_ID_CP_MEC1: 2677 *type = GFX_FW_TYPE_CP_MEC; 2678 break; 2679 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2680 *type = GFX_FW_TYPE_CP_MEC_ME1; 2681 break; 2682 case AMDGPU_UCODE_ID_CP_MEC2: 2683 *type = GFX_FW_TYPE_CP_MEC; 2684 break; 2685 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2686 *type = GFX_FW_TYPE_CP_MEC_ME2; 2687 break; 2688 case AMDGPU_UCODE_ID_RLC_P: 2689 *type = GFX_FW_TYPE_RLC_P; 2690 break; 2691 case AMDGPU_UCODE_ID_RLC_V: 2692 *type = GFX_FW_TYPE_RLC_V; 2693 break; 2694 case AMDGPU_UCODE_ID_RLC_G: 2695 *type = GFX_FW_TYPE_RLC_G; 2696 break; 2697 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2698 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2699 break; 2700 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2701 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2702 break; 2703 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2704 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2705 break; 2706 case AMDGPU_UCODE_ID_RLC_IRAM: 2707 *type = GFX_FW_TYPE_RLC_IRAM; 2708 break; 2709 case AMDGPU_UCODE_ID_RLC_DRAM: 2710 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2711 break; 2712 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2713 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2714 break; 2715 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2716 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2717 break; 2718 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2719 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2720 break; 2721 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2722 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2723 break; 2724 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2725 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2726 break; 2727 case AMDGPU_UCODE_ID_SMC: 2728 *type = GFX_FW_TYPE_SMU; 2729 break; 2730 case AMDGPU_UCODE_ID_PPTABLE: 2731 *type = GFX_FW_TYPE_PPTABLE; 2732 break; 2733 case AMDGPU_UCODE_ID_UVD: 2734 *type = GFX_FW_TYPE_UVD; 2735 break; 2736 case AMDGPU_UCODE_ID_UVD1: 2737 *type = GFX_FW_TYPE_UVD1; 2738 break; 2739 case AMDGPU_UCODE_ID_VCE: 2740 *type = GFX_FW_TYPE_VCE; 2741 break; 2742 case AMDGPU_UCODE_ID_VCN: 2743 *type = GFX_FW_TYPE_VCN; 2744 break; 2745 case AMDGPU_UCODE_ID_VCN1: 2746 *type = GFX_FW_TYPE_VCN1; 2747 break; 2748 case AMDGPU_UCODE_ID_DMCU_ERAM: 2749 *type = GFX_FW_TYPE_DMCU_ERAM; 2750 break; 2751 case AMDGPU_UCODE_ID_DMCU_INTV: 2752 *type = GFX_FW_TYPE_DMCU_ISR; 2753 break; 2754 case AMDGPU_UCODE_ID_VCN0_RAM: 2755 *type = GFX_FW_TYPE_VCN0_RAM; 2756 break; 2757 case AMDGPU_UCODE_ID_VCN1_RAM: 2758 *type = GFX_FW_TYPE_VCN1_RAM; 2759 break; 2760 case AMDGPU_UCODE_ID_DMCUB: 2761 *type = GFX_FW_TYPE_DMUB; 2762 break; 2763 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2764 case AMDGPU_UCODE_ID_SDMA_RS64: 2765 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2766 break; 2767 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2768 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2769 break; 2770 case AMDGPU_UCODE_ID_IMU_I: 2771 *type = GFX_FW_TYPE_IMU_I; 2772 break; 2773 case AMDGPU_UCODE_ID_IMU_D: 2774 *type = GFX_FW_TYPE_IMU_D; 2775 break; 2776 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2777 *type = GFX_FW_TYPE_RS64_PFP; 2778 break; 2779 case AMDGPU_UCODE_ID_CP_RS64_ME: 2780 *type = GFX_FW_TYPE_RS64_ME; 2781 break; 2782 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2783 *type = GFX_FW_TYPE_RS64_MEC; 2784 break; 2785 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2786 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2787 break; 2788 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2789 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2790 break; 2791 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2792 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2793 break; 2794 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2795 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2796 break; 2797 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2798 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2799 break; 2800 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2801 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2802 break; 2803 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2804 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2805 break; 2806 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2807 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2808 break; 2809 case AMDGPU_UCODE_ID_VPE_CTX: 2810 *type = GFX_FW_TYPE_VPEC_FW1; 2811 break; 2812 case AMDGPU_UCODE_ID_VPE_CTL: 2813 *type = GFX_FW_TYPE_VPEC_FW2; 2814 break; 2815 case AMDGPU_UCODE_ID_VPE: 2816 *type = GFX_FW_TYPE_VPE; 2817 break; 2818 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2819 *type = GFX_FW_TYPE_UMSCH_UCODE; 2820 break; 2821 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2822 *type = GFX_FW_TYPE_UMSCH_DATA; 2823 break; 2824 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2825 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2826 break; 2827 case AMDGPU_UCODE_ID_P2S_TABLE: 2828 *type = GFX_FW_TYPE_P2S_TABLE; 2829 break; 2830 case AMDGPU_UCODE_ID_JPEG_RAM: 2831 *type = GFX_FW_TYPE_JPEG_RAM; 2832 break; 2833 case AMDGPU_UCODE_ID_ISP: 2834 *type = GFX_FW_TYPE_ISP; 2835 break; 2836 case AMDGPU_UCODE_ID_MAXIMUM: 2837 default: 2838 return -EINVAL; 2839 } 2840 2841 return 0; 2842 } 2843 2844 static void psp_print_fw_hdr(struct psp_context *psp, 2845 struct amdgpu_firmware_info *ucode) 2846 { 2847 struct amdgpu_device *adev = psp->adev; 2848 struct common_firmware_header *hdr; 2849 2850 switch (ucode->ucode_id) { 2851 case AMDGPU_UCODE_ID_SDMA0: 2852 case AMDGPU_UCODE_ID_SDMA1: 2853 case AMDGPU_UCODE_ID_SDMA2: 2854 case AMDGPU_UCODE_ID_SDMA3: 2855 case AMDGPU_UCODE_ID_SDMA4: 2856 case AMDGPU_UCODE_ID_SDMA5: 2857 case AMDGPU_UCODE_ID_SDMA6: 2858 case AMDGPU_UCODE_ID_SDMA7: 2859 hdr = (struct common_firmware_header *) 2860 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2861 amdgpu_ucode_print_sdma_hdr(hdr); 2862 break; 2863 case AMDGPU_UCODE_ID_CP_CE: 2864 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2865 amdgpu_ucode_print_gfx_hdr(hdr); 2866 break; 2867 case AMDGPU_UCODE_ID_CP_PFP: 2868 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2869 amdgpu_ucode_print_gfx_hdr(hdr); 2870 break; 2871 case AMDGPU_UCODE_ID_CP_ME: 2872 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2873 amdgpu_ucode_print_gfx_hdr(hdr); 2874 break; 2875 case AMDGPU_UCODE_ID_CP_MEC1: 2876 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2877 amdgpu_ucode_print_gfx_hdr(hdr); 2878 break; 2879 case AMDGPU_UCODE_ID_RLC_G: 2880 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2881 amdgpu_ucode_print_rlc_hdr(hdr); 2882 break; 2883 case AMDGPU_UCODE_ID_SMC: 2884 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2885 amdgpu_ucode_print_smc_hdr(hdr); 2886 break; 2887 default: 2888 break; 2889 } 2890 } 2891 2892 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2893 struct amdgpu_firmware_info *ucode, 2894 struct psp_gfx_cmd_resp *cmd) 2895 { 2896 int ret; 2897 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2898 2899 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2900 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2901 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2902 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2903 2904 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2905 if (ret) 2906 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2907 2908 return ret; 2909 } 2910 2911 int psp_execute_ip_fw_load(struct psp_context *psp, 2912 struct amdgpu_firmware_info *ucode) 2913 { 2914 int ret = 0; 2915 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2916 2917 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2918 if (!ret) { 2919 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2920 psp->fence_buf_mc_addr); 2921 } 2922 2923 release_psp_cmd_buf(psp); 2924 2925 return ret; 2926 } 2927 2928 static int psp_load_p2s_table(struct psp_context *psp) 2929 { 2930 int ret; 2931 struct amdgpu_device *adev = psp->adev; 2932 struct amdgpu_firmware_info *ucode = 2933 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2934 2935 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2936 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2937 return 0; 2938 2939 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 2940 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 2941 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2942 0x0036003C; 2943 if (psp->sos.fw_version < supp_vers) 2944 return 0; 2945 } 2946 2947 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2948 return 0; 2949 2950 ret = psp_execute_ip_fw_load(psp, ucode); 2951 2952 return ret; 2953 } 2954 2955 static int psp_load_smu_fw(struct psp_context *psp) 2956 { 2957 int ret; 2958 struct amdgpu_device *adev = psp->adev; 2959 struct amdgpu_firmware_info *ucode = 2960 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2961 struct amdgpu_ras *ras = psp->ras_context.ras; 2962 2963 /* 2964 * Skip SMU FW reloading in case of using BACO for runpm only, 2965 * as SMU is always alive. 2966 */ 2967 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2968 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2969 return 0; 2970 2971 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2972 return 0; 2973 2974 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2975 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2976 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2977 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2978 if (ret) 2979 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2980 } 2981 2982 ret = psp_execute_ip_fw_load(psp, ucode); 2983 2984 if (ret) 2985 dev_err(adev->dev, "PSP load smu failed!\n"); 2986 2987 return ret; 2988 } 2989 2990 static bool fw_load_skip_check(struct psp_context *psp, 2991 struct amdgpu_firmware_info *ucode) 2992 { 2993 if (!ucode->fw || !ucode->ucode_size) 2994 return true; 2995 2996 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2997 return true; 2998 2999 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 3000 (psp_smu_reload_quirk(psp) || 3001 psp->autoload_supported || 3002 psp->pmfw_centralized_cstate_management)) 3003 return true; 3004 3005 if (amdgpu_sriov_vf(psp->adev) && 3006 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 3007 return true; 3008 3009 if (psp->autoload_supported && 3010 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 3011 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 3012 /* skip mec JT when autoload is enabled */ 3013 return true; 3014 3015 return false; 3016 } 3017 3018 int psp_load_fw_list(struct psp_context *psp, 3019 struct amdgpu_firmware_info **ucode_list, int ucode_count) 3020 { 3021 int ret = 0, i; 3022 struct amdgpu_firmware_info *ucode; 3023 3024 for (i = 0; i < ucode_count; ++i) { 3025 ucode = ucode_list[i]; 3026 psp_print_fw_hdr(psp, ucode); 3027 ret = psp_execute_ip_fw_load(psp, ucode); 3028 if (ret) 3029 return ret; 3030 } 3031 return ret; 3032 } 3033 3034 static int psp_load_non_psp_fw(struct psp_context *psp) 3035 { 3036 int i, ret; 3037 struct amdgpu_firmware_info *ucode; 3038 struct amdgpu_device *adev = psp->adev; 3039 3040 if (psp->autoload_supported && 3041 !psp->pmfw_centralized_cstate_management) { 3042 ret = psp_load_smu_fw(psp); 3043 if (ret) 3044 return ret; 3045 } 3046 3047 /* Load P2S table first if it's available */ 3048 psp_load_p2s_table(psp); 3049 3050 for (i = 0; i < adev->firmware.max_ucodes; i++) { 3051 ucode = &adev->firmware.ucode[i]; 3052 3053 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 3054 !fw_load_skip_check(psp, ucode)) { 3055 ret = psp_load_smu_fw(psp); 3056 if (ret) 3057 return ret; 3058 continue; 3059 } 3060 3061 if (fw_load_skip_check(psp, ucode)) 3062 continue; 3063 3064 if (psp->autoload_supported && 3065 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3066 IP_VERSION(11, 0, 7) || 3067 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3068 IP_VERSION(11, 0, 11) || 3069 amdgpu_ip_version(adev, MP0_HWIP, 0) == 3070 IP_VERSION(11, 0, 12)) && 3071 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 3072 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 3073 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 3074 /* PSP only receive one SDMA fw for sienna_cichlid, 3075 * as all four sdma fw are same 3076 */ 3077 continue; 3078 3079 psp_print_fw_hdr(psp, ucode); 3080 3081 ret = psp_execute_ip_fw_load(psp, ucode); 3082 if (ret) 3083 return ret; 3084 3085 /* Start rlc autoload after psp received all the gfx firmware */ 3086 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 3087 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 3088 ret = psp_rlc_autoload_start(psp); 3089 if (ret) { 3090 dev_err(adev->dev, "Failed to start rlc autoload\n"); 3091 return ret; 3092 } 3093 } 3094 } 3095 3096 return 0; 3097 } 3098 3099 static int psp_load_fw(struct amdgpu_device *adev) 3100 { 3101 int ret; 3102 struct psp_context *psp = &adev->psp; 3103 3104 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3105 /* should not destroy ring, only stop */ 3106 psp_ring_stop(psp, PSP_RING_TYPE__KM); 3107 } else { 3108 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 3109 3110 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 3111 if (ret) { 3112 dev_err(adev->dev, "PSP ring init failed!\n"); 3113 goto failed; 3114 } 3115 } 3116 3117 ret = psp_hw_start(psp); 3118 if (ret) 3119 goto failed; 3120 3121 ret = psp_load_non_psp_fw(psp); 3122 if (ret) 3123 goto failed1; 3124 3125 ret = psp_asd_initialize(psp); 3126 if (ret) { 3127 dev_err(adev->dev, "PSP load asd failed!\n"); 3128 goto failed1; 3129 } 3130 3131 ret = psp_rl_load(adev); 3132 if (ret) { 3133 dev_err(adev->dev, "PSP load RL failed!\n"); 3134 goto failed1; 3135 } 3136 3137 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 3138 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3139 ret = psp_xgmi_initialize(psp, false, true); 3140 /* Warning the XGMI seesion initialize failure 3141 * Instead of stop driver initialization 3142 */ 3143 if (ret) 3144 dev_err(psp->adev->dev, 3145 "XGMI: Failed to initialize XGMI session\n"); 3146 } 3147 } 3148 3149 if (psp->ta_fw) { 3150 ret = psp_ras_initialize(psp); 3151 if (ret) 3152 dev_err(psp->adev->dev, 3153 "RAS: Failed to initialize RAS\n"); 3154 3155 ret = psp_hdcp_initialize(psp); 3156 if (ret) 3157 dev_err(psp->adev->dev, 3158 "HDCP: Failed to initialize HDCP\n"); 3159 3160 ret = psp_dtm_initialize(psp); 3161 if (ret) 3162 dev_err(psp->adev->dev, 3163 "DTM: Failed to initialize DTM\n"); 3164 3165 ret = psp_rap_initialize(psp); 3166 if (ret) 3167 dev_err(psp->adev->dev, 3168 "RAP: Failed to initialize RAP\n"); 3169 3170 ret = psp_securedisplay_initialize(psp); 3171 if (ret) 3172 dev_err(psp->adev->dev, 3173 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3174 } 3175 3176 return 0; 3177 3178 failed1: 3179 psp_free_shared_bufs(psp); 3180 failed: 3181 /* 3182 * all cleanup jobs (xgmi terminate, ras terminate, 3183 * ring destroy, cmd/fence/fw buffers destory, 3184 * psp->cmd destory) are delayed to psp_hw_fini 3185 */ 3186 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3187 return ret; 3188 } 3189 3190 static int psp_hw_init(struct amdgpu_ip_block *ip_block) 3191 { 3192 int ret; 3193 struct amdgpu_device *adev = ip_block->adev; 3194 3195 mutex_lock(&adev->firmware.mutex); 3196 3197 ret = amdgpu_ucode_init_bo(adev); 3198 if (ret) 3199 goto failed; 3200 3201 ret = psp_load_fw(adev); 3202 if (ret) { 3203 dev_err(adev->dev, "PSP firmware loading failed\n"); 3204 goto failed; 3205 } 3206 3207 mutex_unlock(&adev->firmware.mutex); 3208 return 0; 3209 3210 failed: 3211 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 3212 mutex_unlock(&adev->firmware.mutex); 3213 return -EINVAL; 3214 } 3215 3216 static int psp_hw_fini(struct amdgpu_ip_block *ip_block) 3217 { 3218 struct amdgpu_device *adev = ip_block->adev; 3219 struct psp_context *psp = &adev->psp; 3220 3221 if (psp->ta_fw) { 3222 psp_ras_terminate(psp); 3223 psp_securedisplay_terminate(psp); 3224 psp_rap_terminate(psp); 3225 psp_dtm_terminate(psp); 3226 psp_hdcp_terminate(psp); 3227 3228 if (adev->gmc.xgmi.num_physical_nodes > 1) 3229 psp_xgmi_terminate(psp); 3230 } 3231 3232 psp_asd_terminate(psp); 3233 psp_tmr_terminate(psp); 3234 3235 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3236 3237 return 0; 3238 } 3239 3240 static int psp_suspend(struct amdgpu_ip_block *ip_block) 3241 { 3242 int ret = 0; 3243 struct amdgpu_device *adev = ip_block->adev; 3244 struct psp_context *psp = &adev->psp; 3245 3246 if (adev->gmc.xgmi.num_physical_nodes > 1 && 3247 psp->xgmi_context.context.initialized) { 3248 ret = psp_xgmi_terminate(psp); 3249 if (ret) { 3250 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 3251 goto out; 3252 } 3253 } 3254 3255 if (psp->ta_fw) { 3256 ret = psp_ras_terminate(psp); 3257 if (ret) { 3258 dev_err(adev->dev, "Failed to terminate ras ta\n"); 3259 goto out; 3260 } 3261 ret = psp_hdcp_terminate(psp); 3262 if (ret) { 3263 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 3264 goto out; 3265 } 3266 ret = psp_dtm_terminate(psp); 3267 if (ret) { 3268 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 3269 goto out; 3270 } 3271 ret = psp_rap_terminate(psp); 3272 if (ret) { 3273 dev_err(adev->dev, "Failed to terminate rap ta\n"); 3274 goto out; 3275 } 3276 ret = psp_securedisplay_terminate(psp); 3277 if (ret) { 3278 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 3279 goto out; 3280 } 3281 } 3282 3283 ret = psp_asd_terminate(psp); 3284 if (ret) { 3285 dev_err(adev->dev, "Failed to terminate asd\n"); 3286 goto out; 3287 } 3288 3289 ret = psp_tmr_terminate(psp); 3290 if (ret) { 3291 dev_err(adev->dev, "Failed to terminate tmr\n"); 3292 goto out; 3293 } 3294 3295 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 3296 if (ret) 3297 dev_err(adev->dev, "PSP ring stop failed\n"); 3298 3299 out: 3300 return ret; 3301 } 3302 3303 static int psp_resume(struct amdgpu_ip_block *ip_block) 3304 { 3305 int ret; 3306 struct amdgpu_device *adev = ip_block->adev; 3307 struct psp_context *psp = &adev->psp; 3308 3309 dev_info(adev->dev, "PSP is resuming...\n"); 3310 3311 if (psp->mem_train_ctx.enable_mem_training) { 3312 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3313 if (ret) { 3314 dev_err(adev->dev, "Failed to process memory training!\n"); 3315 return ret; 3316 } 3317 } 3318 3319 mutex_lock(&adev->firmware.mutex); 3320 3321 ret = amdgpu_ucode_init_bo(adev); 3322 if (ret) 3323 goto failed; 3324 3325 ret = psp_hw_start(psp); 3326 if (ret) 3327 goto failed; 3328 3329 ret = psp_load_non_psp_fw(psp); 3330 if (ret) 3331 goto failed; 3332 3333 ret = psp_asd_initialize(psp); 3334 if (ret) { 3335 dev_err(adev->dev, "PSP load asd failed!\n"); 3336 goto failed; 3337 } 3338 3339 ret = psp_rl_load(adev); 3340 if (ret) { 3341 dev_err(adev->dev, "PSP load RL failed!\n"); 3342 goto failed; 3343 } 3344 3345 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3346 ret = psp_xgmi_initialize(psp, false, true); 3347 /* Warning the XGMI seesion initialize failure 3348 * Instead of stop driver initialization 3349 */ 3350 if (ret) 3351 dev_err(psp->adev->dev, 3352 "XGMI: Failed to initialize XGMI session\n"); 3353 } 3354 3355 if (psp->ta_fw) { 3356 ret = psp_ras_initialize(psp); 3357 if (ret) 3358 dev_err(psp->adev->dev, 3359 "RAS: Failed to initialize RAS\n"); 3360 3361 ret = psp_hdcp_initialize(psp); 3362 if (ret) 3363 dev_err(psp->adev->dev, 3364 "HDCP: Failed to initialize HDCP\n"); 3365 3366 ret = psp_dtm_initialize(psp); 3367 if (ret) 3368 dev_err(psp->adev->dev, 3369 "DTM: Failed to initialize DTM\n"); 3370 3371 ret = psp_rap_initialize(psp); 3372 if (ret) 3373 dev_err(psp->adev->dev, 3374 "RAP: Failed to initialize RAP\n"); 3375 3376 ret = psp_securedisplay_initialize(psp); 3377 if (ret) 3378 dev_err(psp->adev->dev, 3379 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3380 } 3381 3382 mutex_unlock(&adev->firmware.mutex); 3383 3384 return 0; 3385 3386 failed: 3387 dev_err(adev->dev, "PSP resume failed\n"); 3388 mutex_unlock(&adev->firmware.mutex); 3389 return ret; 3390 } 3391 3392 int psp_gpu_reset(struct amdgpu_device *adev) 3393 { 3394 int ret; 3395 3396 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3397 return 0; 3398 3399 mutex_lock(&adev->psp.mutex); 3400 ret = psp_mode1_reset(&adev->psp); 3401 mutex_unlock(&adev->psp.mutex); 3402 3403 return ret; 3404 } 3405 3406 int psp_rlc_autoload_start(struct psp_context *psp) 3407 { 3408 int ret; 3409 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3410 3411 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3412 3413 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3414 psp->fence_buf_mc_addr); 3415 3416 release_psp_cmd_buf(psp); 3417 3418 return ret; 3419 } 3420 3421 int psp_ring_cmd_submit(struct psp_context *psp, 3422 uint64_t cmd_buf_mc_addr, 3423 uint64_t fence_mc_addr, 3424 int index) 3425 { 3426 unsigned int psp_write_ptr_reg = 0; 3427 struct psp_gfx_rb_frame *write_frame; 3428 struct psp_ring *ring = &psp->km_ring; 3429 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3430 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3431 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3432 struct amdgpu_device *adev = psp->adev; 3433 uint32_t ring_size_dw = ring->ring_size / 4; 3434 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3435 3436 /* KM (GPCOM) prepare write pointer */ 3437 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3438 3439 /* Update KM RB frame pointer to new frame */ 3440 /* write_frame ptr increments by size of rb_frame in bytes */ 3441 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3442 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3443 write_frame = ring_buffer_start; 3444 else 3445 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3446 /* Check invalid write_frame ptr address */ 3447 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3448 dev_err(adev->dev, 3449 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3450 ring_buffer_start, ring_buffer_end, write_frame); 3451 dev_err(adev->dev, 3452 "write_frame is pointing to address out of bounds\n"); 3453 return -EINVAL; 3454 } 3455 3456 /* Initialize KM RB frame */ 3457 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3458 3459 /* Update KM RB frame */ 3460 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3461 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3462 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3463 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3464 write_frame->fence_value = index; 3465 amdgpu_device_flush_hdp(adev, NULL); 3466 3467 /* Update the write Pointer in DWORDs */ 3468 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3469 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3470 return 0; 3471 } 3472 3473 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3474 { 3475 struct amdgpu_device *adev = psp->adev; 3476 const struct psp_firmware_header_v1_0 *asd_hdr; 3477 int err = 0; 3478 3479 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED, 3480 "amdgpu/%s_asd.bin", chip_name); 3481 if (err) 3482 goto out; 3483 3484 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3485 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3486 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3487 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3488 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3489 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3490 return 0; 3491 out: 3492 amdgpu_ucode_release(&adev->psp.asd_fw); 3493 return err; 3494 } 3495 3496 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3497 { 3498 struct amdgpu_device *adev = psp->adev; 3499 const struct psp_firmware_header_v1_0 *toc_hdr; 3500 int err = 0; 3501 3502 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED, 3503 "amdgpu/%s_toc.bin", chip_name); 3504 if (err) 3505 goto out; 3506 3507 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3508 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3509 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3510 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3511 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3512 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3513 return 0; 3514 out: 3515 amdgpu_ucode_release(&adev->psp.toc_fw); 3516 return err; 3517 } 3518 3519 static int parse_sos_bin_descriptor(struct psp_context *psp, 3520 const struct psp_fw_bin_desc *desc, 3521 const struct psp_firmware_header_v2_0 *sos_hdr) 3522 { 3523 uint8_t *ucode_start_addr = NULL; 3524 3525 if (!psp || !desc || !sos_hdr) 3526 return -EINVAL; 3527 3528 ucode_start_addr = (uint8_t *)sos_hdr + 3529 le32_to_cpu(desc->offset_bytes) + 3530 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3531 3532 switch (desc->fw_type) { 3533 case PSP_FW_TYPE_PSP_SOS: 3534 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3535 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3536 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3537 psp->sos.start_addr = ucode_start_addr; 3538 break; 3539 case PSP_FW_TYPE_PSP_SYS_DRV: 3540 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3541 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3542 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3543 psp->sys.start_addr = ucode_start_addr; 3544 break; 3545 case PSP_FW_TYPE_PSP_KDB: 3546 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3547 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3548 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3549 psp->kdb.start_addr = ucode_start_addr; 3550 break; 3551 case PSP_FW_TYPE_PSP_TOC: 3552 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3553 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3554 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3555 psp->toc.start_addr = ucode_start_addr; 3556 break; 3557 case PSP_FW_TYPE_PSP_SPL: 3558 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3559 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3560 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3561 psp->spl.start_addr = ucode_start_addr; 3562 break; 3563 case PSP_FW_TYPE_PSP_RL: 3564 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3565 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3566 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3567 psp->rl.start_addr = ucode_start_addr; 3568 break; 3569 case PSP_FW_TYPE_PSP_SOC_DRV: 3570 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3571 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3572 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3573 psp->soc_drv.start_addr = ucode_start_addr; 3574 break; 3575 case PSP_FW_TYPE_PSP_INTF_DRV: 3576 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3577 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3578 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3579 psp->intf_drv.start_addr = ucode_start_addr; 3580 break; 3581 case PSP_FW_TYPE_PSP_DBG_DRV: 3582 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3583 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3584 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3585 psp->dbg_drv.start_addr = ucode_start_addr; 3586 break; 3587 case PSP_FW_TYPE_PSP_RAS_DRV: 3588 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3589 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3590 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3591 psp->ras_drv.start_addr = ucode_start_addr; 3592 break; 3593 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3594 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3595 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3596 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3597 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3598 break; 3599 case PSP_FW_TYPE_PSP_SPDM_DRV: 3600 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version); 3601 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version); 3602 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3603 psp->spdm_drv.start_addr = ucode_start_addr; 3604 break; 3605 default: 3606 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3607 break; 3608 } 3609 3610 return 0; 3611 } 3612 3613 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3614 { 3615 const struct psp_firmware_header_v1_0 *sos_hdr; 3616 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3617 uint8_t *ucode_array_start_addr; 3618 3619 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3620 ucode_array_start_addr = (uint8_t *)sos_hdr + 3621 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3622 3623 if (adev->gmc.xgmi.connected_to_cpu || 3624 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3625 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3626 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3627 3628 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3629 adev->psp.sys.start_addr = ucode_array_start_addr; 3630 3631 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3632 adev->psp.sos.start_addr = ucode_array_start_addr + 3633 le32_to_cpu(sos_hdr->sos.offset_bytes); 3634 } else { 3635 /* Load alternate PSP SOS FW */ 3636 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3637 3638 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3639 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3640 3641 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3642 adev->psp.sys.start_addr = ucode_array_start_addr + 3643 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3644 3645 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3646 adev->psp.sos.start_addr = ucode_array_start_addr + 3647 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3648 } 3649 3650 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3651 dev_warn(adev->dev, "PSP SOS FW not available"); 3652 return -EINVAL; 3653 } 3654 3655 return 0; 3656 } 3657 3658 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3659 { 3660 struct amdgpu_device *adev = psp->adev; 3661 const struct psp_firmware_header_v1_0 *sos_hdr; 3662 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3663 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3664 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3665 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3666 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; 3667 int fw_index, fw_bin_count, start_index = 0; 3668 const struct psp_fw_bin_desc *fw_bin; 3669 uint8_t *ucode_array_start_addr; 3670 int err = 0; 3671 3672 if (amdgpu_is_kicker_fw(adev)) 3673 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3674 "amdgpu/%s_sos_kicker.bin", chip_name); 3675 else 3676 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3677 "amdgpu/%s_sos.bin", chip_name); 3678 if (err) 3679 goto out; 3680 3681 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3682 ucode_array_start_addr = (uint8_t *)sos_hdr + 3683 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3684 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3685 3686 switch (sos_hdr->header.header_version_major) { 3687 case 1: 3688 err = psp_init_sos_base_fw(adev); 3689 if (err) 3690 goto out; 3691 3692 if (sos_hdr->header.header_version_minor == 1) { 3693 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3694 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3695 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3696 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3697 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3698 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3699 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3700 } 3701 if (sos_hdr->header.header_version_minor == 2) { 3702 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3703 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3704 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3705 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3706 } 3707 if (sos_hdr->header.header_version_minor == 3) { 3708 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3709 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3710 adev->psp.toc.start_addr = ucode_array_start_addr + 3711 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3712 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3713 adev->psp.kdb.start_addr = ucode_array_start_addr + 3714 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3715 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3716 adev->psp.spl.start_addr = ucode_array_start_addr + 3717 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3718 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3719 adev->psp.rl.start_addr = ucode_array_start_addr + 3720 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3721 } 3722 break; 3723 case 2: 3724 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3725 3726 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); 3727 3728 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { 3729 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3730 err = -EINVAL; 3731 goto out; 3732 } 3733 3734 if (sos_hdr_v2_0->header.header_version_minor == 1) { 3735 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; 3736 3737 fw_bin = sos_hdr_v2_1->psp_fw_bin; 3738 3739 if (psp_is_aux_sos_load_required(psp)) 3740 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3741 else 3742 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3743 3744 } else { 3745 fw_bin = sos_hdr_v2_0->psp_fw_bin; 3746 } 3747 3748 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { 3749 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, 3750 sos_hdr_v2_0); 3751 if (err) 3752 goto out; 3753 } 3754 break; 3755 default: 3756 dev_err(adev->dev, 3757 "unsupported psp sos firmware\n"); 3758 err = -EINVAL; 3759 goto out; 3760 } 3761 3762 return 0; 3763 out: 3764 amdgpu_ucode_release(&adev->psp.sos_fw); 3765 3766 return err; 3767 } 3768 3769 static bool is_ta_fw_applicable(struct psp_context *psp, 3770 const struct psp_fw_bin_desc *desc) 3771 { 3772 struct amdgpu_device *adev = psp->adev; 3773 uint32_t fw_version; 3774 3775 switch (desc->fw_type) { 3776 case TA_FW_TYPE_PSP_XGMI: 3777 case TA_FW_TYPE_PSP_XGMI_AUX: 3778 /* for now, AUX TA only exists on 13.0.6 ta bin, 3779 * from v20.00.0x.14 3780 */ 3781 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3782 IP_VERSION(13, 0, 6)) { 3783 fw_version = le32_to_cpu(desc->fw_version); 3784 3785 if (adev->flags & AMD_IS_APU && 3786 (fw_version & 0xff) >= 0x14) 3787 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; 3788 else 3789 return desc->fw_type == TA_FW_TYPE_PSP_XGMI; 3790 } 3791 break; 3792 default: 3793 break; 3794 } 3795 3796 return true; 3797 } 3798 3799 static int parse_ta_bin_descriptor(struct psp_context *psp, 3800 const struct psp_fw_bin_desc *desc, 3801 const struct ta_firmware_header_v2_0 *ta_hdr) 3802 { 3803 uint8_t *ucode_start_addr = NULL; 3804 3805 if (!psp || !desc || !ta_hdr) 3806 return -EINVAL; 3807 3808 if (!is_ta_fw_applicable(psp, desc)) 3809 return 0; 3810 3811 ucode_start_addr = (uint8_t *)ta_hdr + 3812 le32_to_cpu(desc->offset_bytes) + 3813 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3814 3815 switch (desc->fw_type) { 3816 case TA_FW_TYPE_PSP_ASD: 3817 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3818 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3819 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3820 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3821 break; 3822 case TA_FW_TYPE_PSP_XGMI: 3823 case TA_FW_TYPE_PSP_XGMI_AUX: 3824 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3825 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3826 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3827 break; 3828 case TA_FW_TYPE_PSP_RAS: 3829 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3830 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3831 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3832 break; 3833 case TA_FW_TYPE_PSP_HDCP: 3834 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3835 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3836 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3837 break; 3838 case TA_FW_TYPE_PSP_DTM: 3839 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3840 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3841 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3842 break; 3843 case TA_FW_TYPE_PSP_RAP: 3844 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3845 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3846 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3847 break; 3848 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3849 psp->securedisplay_context.context.bin_desc.fw_version = 3850 le32_to_cpu(desc->fw_version); 3851 psp->securedisplay_context.context.bin_desc.size_bytes = 3852 le32_to_cpu(desc->size_bytes); 3853 psp->securedisplay_context.context.bin_desc.start_addr = 3854 ucode_start_addr; 3855 break; 3856 default: 3857 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3858 break; 3859 } 3860 3861 return 0; 3862 } 3863 3864 static int parse_ta_v1_microcode(struct psp_context *psp) 3865 { 3866 const struct ta_firmware_header_v1_0 *ta_hdr; 3867 struct amdgpu_device *adev = psp->adev; 3868 3869 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3870 3871 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3872 return -EINVAL; 3873 3874 adev->psp.xgmi_context.context.bin_desc.fw_version = 3875 le32_to_cpu(ta_hdr->xgmi.fw_version); 3876 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3877 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3878 adev->psp.xgmi_context.context.bin_desc.start_addr = 3879 (uint8_t *)ta_hdr + 3880 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3881 3882 adev->psp.ras_context.context.bin_desc.fw_version = 3883 le32_to_cpu(ta_hdr->ras.fw_version); 3884 adev->psp.ras_context.context.bin_desc.size_bytes = 3885 le32_to_cpu(ta_hdr->ras.size_bytes); 3886 adev->psp.ras_context.context.bin_desc.start_addr = 3887 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3888 le32_to_cpu(ta_hdr->ras.offset_bytes); 3889 3890 adev->psp.hdcp_context.context.bin_desc.fw_version = 3891 le32_to_cpu(ta_hdr->hdcp.fw_version); 3892 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3893 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3894 adev->psp.hdcp_context.context.bin_desc.start_addr = 3895 (uint8_t *)ta_hdr + 3896 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3897 3898 adev->psp.dtm_context.context.bin_desc.fw_version = 3899 le32_to_cpu(ta_hdr->dtm.fw_version); 3900 adev->psp.dtm_context.context.bin_desc.size_bytes = 3901 le32_to_cpu(ta_hdr->dtm.size_bytes); 3902 adev->psp.dtm_context.context.bin_desc.start_addr = 3903 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3904 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3905 3906 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3907 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3908 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3909 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3910 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3911 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3912 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3913 3914 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3915 3916 return 0; 3917 } 3918 3919 static int parse_ta_v2_microcode(struct psp_context *psp) 3920 { 3921 const struct ta_firmware_header_v2_0 *ta_hdr; 3922 struct amdgpu_device *adev = psp->adev; 3923 int err = 0; 3924 int ta_index = 0; 3925 3926 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3927 3928 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3929 return -EINVAL; 3930 3931 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3932 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3933 return -EINVAL; 3934 } 3935 3936 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3937 err = parse_ta_bin_descriptor(psp, 3938 &ta_hdr->ta_fw_bin[ta_index], 3939 ta_hdr); 3940 if (err) 3941 return err; 3942 } 3943 3944 return 0; 3945 } 3946 3947 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3948 { 3949 const struct common_firmware_header *hdr; 3950 struct amdgpu_device *adev = psp->adev; 3951 int err; 3952 3953 if (amdgpu_is_kicker_fw(adev)) 3954 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3955 "amdgpu/%s_ta_kicker.bin", chip_name); 3956 else 3957 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3958 "amdgpu/%s_ta.bin", chip_name); 3959 if (err) 3960 return err; 3961 3962 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3963 switch (le16_to_cpu(hdr->header_version_major)) { 3964 case 1: 3965 err = parse_ta_v1_microcode(psp); 3966 break; 3967 case 2: 3968 err = parse_ta_v2_microcode(psp); 3969 break; 3970 default: 3971 dev_err(adev->dev, "unsupported TA header version\n"); 3972 err = -EINVAL; 3973 } 3974 3975 if (err) 3976 amdgpu_ucode_release(&adev->psp.ta_fw); 3977 3978 return err; 3979 } 3980 3981 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3982 { 3983 struct amdgpu_device *adev = psp->adev; 3984 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3985 struct amdgpu_firmware_info *info = NULL; 3986 int err = 0; 3987 3988 if (!amdgpu_sriov_vf(adev)) { 3989 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3990 return -EINVAL; 3991 } 3992 3993 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL, 3994 "amdgpu/%s_cap.bin", chip_name); 3995 if (err) { 3996 if (err == -ENODEV) { 3997 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3998 err = 0; 3999 } else { 4000 dev_err(adev->dev, "fail to initialize cap microcode\n"); 4001 } 4002 goto out; 4003 } 4004 4005 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 4006 info->ucode_id = AMDGPU_UCODE_ID_CAP; 4007 info->fw = adev->psp.cap_fw; 4008 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 4009 adev->psp.cap_fw->data; 4010 adev->firmware.fw_size += ALIGN( 4011 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 4012 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 4013 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 4014 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 4015 4016 return 0; 4017 4018 out: 4019 amdgpu_ucode_release(&adev->psp.cap_fw); 4020 return err; 4021 } 4022 4023 int psp_config_sq_perfmon(struct psp_context *psp, 4024 uint32_t xcp_id, bool core_override_enable, 4025 bool reg_override_enable, bool perfmon_override_enable) 4026 { 4027 int ret; 4028 4029 if (amdgpu_sriov_vf(psp->adev)) 4030 return 0; 4031 4032 if (xcp_id > MAX_XCP) { 4033 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); 4034 return -EINVAL; 4035 } 4036 4037 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { 4038 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", 4039 amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); 4040 return -EINVAL; 4041 } 4042 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 4043 4044 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; 4045 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); 4046 cmd->cmd.config_sq_perfmon.core_override = core_override_enable; 4047 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; 4048 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; 4049 4050 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 4051 if (ret) 4052 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", 4053 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); 4054 4055 release_psp_cmd_buf(psp); 4056 return ret; 4057 } 4058 4059 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 4060 enum amd_clockgating_state state) 4061 { 4062 return 0; 4063 } 4064 4065 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block, 4066 enum amd_powergating_state state) 4067 { 4068 return 0; 4069 } 4070 4071 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 4072 struct device_attribute *attr, 4073 char *buf) 4074 { 4075 struct drm_device *ddev = dev_get_drvdata(dev); 4076 struct amdgpu_device *adev = drm_to_adev(ddev); 4077 struct amdgpu_ip_block *ip_block; 4078 uint32_t fw_ver; 4079 int ret; 4080 4081 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4082 if (!ip_block || !ip_block->status.late_initialized) { 4083 dev_info(adev->dev, "PSP block is not ready yet\n."); 4084 return -EBUSY; 4085 } 4086 4087 mutex_lock(&adev->psp.mutex); 4088 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 4089 mutex_unlock(&adev->psp.mutex); 4090 4091 if (ret) { 4092 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 4093 return ret; 4094 } 4095 4096 return sysfs_emit(buf, "%x\n", fw_ver); 4097 } 4098 4099 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 4100 struct device_attribute *attr, 4101 const char *buf, 4102 size_t count) 4103 { 4104 struct drm_device *ddev = dev_get_drvdata(dev); 4105 struct amdgpu_device *adev = drm_to_adev(ddev); 4106 int ret, idx; 4107 const struct firmware *usbc_pd_fw; 4108 struct amdgpu_bo *fw_buf_bo = NULL; 4109 uint64_t fw_pri_mc_addr; 4110 void *fw_pri_cpu_addr; 4111 struct amdgpu_ip_block *ip_block; 4112 4113 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 4114 if (!ip_block || !ip_block->status.late_initialized) { 4115 dev_err(adev->dev, "PSP block is not ready yet."); 4116 return -EBUSY; 4117 } 4118 4119 if (!drm_dev_enter(ddev, &idx)) 4120 return -ENODEV; 4121 4122 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED, 4123 "amdgpu/%s", buf); 4124 if (ret) 4125 goto fail; 4126 4127 /* LFB address which is aligned to 1MB boundary per PSP request */ 4128 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 4129 AMDGPU_GEM_DOMAIN_VRAM | 4130 AMDGPU_GEM_DOMAIN_GTT, 4131 &fw_buf_bo, &fw_pri_mc_addr, 4132 &fw_pri_cpu_addr); 4133 if (ret) 4134 goto rel_buf; 4135 4136 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 4137 4138 mutex_lock(&adev->psp.mutex); 4139 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 4140 mutex_unlock(&adev->psp.mutex); 4141 4142 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4143 4144 rel_buf: 4145 amdgpu_ucode_release(&usbc_pd_fw); 4146 fail: 4147 if (ret) { 4148 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 4149 count = ret; 4150 } 4151 4152 drm_dev_exit(idx); 4153 return count; 4154 } 4155 4156 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 4157 { 4158 int idx; 4159 4160 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 4161 return; 4162 4163 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 4164 memcpy(psp->fw_pri_buf, start_addr, bin_size); 4165 4166 drm_dev_exit(idx); 4167 } 4168 4169 /** 4170 * DOC: usbc_pd_fw 4171 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 4172 * this file will trigger the update process. 4173 */ 4174 static DEVICE_ATTR(usbc_pd_fw, 0644, 4175 psp_usbc_pd_fw_sysfs_read, 4176 psp_usbc_pd_fw_sysfs_write); 4177 4178 int is_psp_fw_valid(struct psp_bin_desc bin) 4179 { 4180 return bin.size_bytes; 4181 } 4182 4183 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 4184 const struct bin_attribute *bin_attr, 4185 char *buffer, loff_t pos, size_t count) 4186 { 4187 struct device *dev = kobj_to_dev(kobj); 4188 struct drm_device *ddev = dev_get_drvdata(dev); 4189 struct amdgpu_device *adev = drm_to_adev(ddev); 4190 4191 adev->psp.vbflash_done = false; 4192 4193 /* Safeguard against memory drain */ 4194 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 4195 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 4196 kvfree(adev->psp.vbflash_tmp_buf); 4197 adev->psp.vbflash_tmp_buf = NULL; 4198 adev->psp.vbflash_image_size = 0; 4199 return -ENOMEM; 4200 } 4201 4202 /* TODO Just allocate max for now and optimize to realloc later if needed */ 4203 if (!adev->psp.vbflash_tmp_buf) { 4204 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 4205 if (!adev->psp.vbflash_tmp_buf) 4206 return -ENOMEM; 4207 } 4208 4209 mutex_lock(&adev->psp.mutex); 4210 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 4211 adev->psp.vbflash_image_size += count; 4212 mutex_unlock(&adev->psp.mutex); 4213 4214 dev_dbg(adev->dev, "IFWI staged for update\n"); 4215 4216 return count; 4217 } 4218 4219 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 4220 const struct bin_attribute *bin_attr, char *buffer, 4221 loff_t pos, size_t count) 4222 { 4223 struct device *dev = kobj_to_dev(kobj); 4224 struct drm_device *ddev = dev_get_drvdata(dev); 4225 struct amdgpu_device *adev = drm_to_adev(ddev); 4226 struct amdgpu_bo *fw_buf_bo = NULL; 4227 uint64_t fw_pri_mc_addr; 4228 void *fw_pri_cpu_addr; 4229 int ret; 4230 4231 if (adev->psp.vbflash_image_size == 0) 4232 return -EINVAL; 4233 4234 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 4235 4236 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 4237 AMDGPU_GPU_PAGE_SIZE, 4238 AMDGPU_GEM_DOMAIN_VRAM, 4239 &fw_buf_bo, 4240 &fw_pri_mc_addr, 4241 &fw_pri_cpu_addr); 4242 if (ret) 4243 goto rel_buf; 4244 4245 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 4246 4247 mutex_lock(&adev->psp.mutex); 4248 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 4249 mutex_unlock(&adev->psp.mutex); 4250 4251 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4252 4253 rel_buf: 4254 kvfree(adev->psp.vbflash_tmp_buf); 4255 adev->psp.vbflash_tmp_buf = NULL; 4256 adev->psp.vbflash_image_size = 0; 4257 4258 if (ret) { 4259 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 4260 return ret; 4261 } 4262 4263 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 4264 return 0; 4265 } 4266 4267 /** 4268 * DOC: psp_vbflash 4269 * Writing to this file will stage an IFWI for update. Reading from this file 4270 * will trigger the update process. 4271 */ 4272 static const struct bin_attribute psp_vbflash_bin_attr = { 4273 .attr = {.name = "psp_vbflash", .mode = 0660}, 4274 .size = 0, 4275 .write_new = amdgpu_psp_vbflash_write, 4276 .read_new = amdgpu_psp_vbflash_read, 4277 }; 4278 4279 /** 4280 * DOC: psp_vbflash_status 4281 * The status of the flash process. 4282 * 0: IFWI flash not complete. 4283 * 1: IFWI flash complete. 4284 */ 4285 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 4286 struct device_attribute *attr, 4287 char *buf) 4288 { 4289 struct drm_device *ddev = dev_get_drvdata(dev); 4290 struct amdgpu_device *adev = drm_to_adev(ddev); 4291 uint32_t vbflash_status; 4292 4293 vbflash_status = psp_vbflash_status(&adev->psp); 4294 if (!adev->psp.vbflash_done) 4295 vbflash_status = 0; 4296 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 4297 vbflash_status = 1; 4298 4299 return sysfs_emit(buf, "0x%x\n", vbflash_status); 4300 } 4301 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 4302 4303 static const struct bin_attribute *const bin_flash_attrs[] = { 4304 &psp_vbflash_bin_attr, 4305 NULL 4306 }; 4307 4308 static struct attribute *flash_attrs[] = { 4309 &dev_attr_psp_vbflash_status.attr, 4310 &dev_attr_usbc_pd_fw.attr, 4311 NULL 4312 }; 4313 4314 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 4315 { 4316 struct device *dev = kobj_to_dev(kobj); 4317 struct drm_device *ddev = dev_get_drvdata(dev); 4318 struct amdgpu_device *adev = drm_to_adev(ddev); 4319 4320 if (attr == &dev_attr_usbc_pd_fw.attr) 4321 return adev->psp.sup_pd_fw_up ? 0660 : 0; 4322 4323 return adev->psp.sup_ifwi_up ? 0440 : 0; 4324 } 4325 4326 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 4327 const struct bin_attribute *attr, 4328 int idx) 4329 { 4330 struct device *dev = kobj_to_dev(kobj); 4331 struct drm_device *ddev = dev_get_drvdata(dev); 4332 struct amdgpu_device *adev = drm_to_adev(ddev); 4333 4334 return adev->psp.sup_ifwi_up ? 0660 : 0; 4335 } 4336 4337 const struct attribute_group amdgpu_flash_attr_group = { 4338 .attrs = flash_attrs, 4339 .bin_attrs_new = bin_flash_attrs, 4340 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 4341 .is_visible = amdgpu_flash_attr_is_visible, 4342 }; 4343 4344 #if defined(CONFIG_DEBUG_FS) 4345 static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp) 4346 { 4347 struct amdgpu_device *adev = filp->f_inode->i_private; 4348 struct spirom_bo *bo_triplet; 4349 int ret; 4350 4351 /* serialize the open() file calling */ 4352 if (!mutex_trylock(&adev->psp.mutex)) 4353 return -EBUSY; 4354 4355 /* 4356 * make sure only one userpace process is alive for dumping so that 4357 * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed. 4358 * let's say the case where one process try opening the file while 4359 * another one has proceeded to read or release. In this way, eliminate 4360 * the use of mutex for read() or release() callback as well. 4361 */ 4362 if (adev->psp.spirom_dump_trip) { 4363 mutex_unlock(&adev->psp.mutex); 4364 return -EBUSY; 4365 } 4366 4367 bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL); 4368 if (!bo_triplet) { 4369 mutex_unlock(&adev->psp.mutex); 4370 return -ENOMEM; 4371 } 4372 4373 ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2, 4374 AMDGPU_GPU_PAGE_SIZE, 4375 AMDGPU_GEM_DOMAIN_GTT, 4376 &bo_triplet->bo, 4377 &bo_triplet->mc_addr, 4378 &bo_triplet->cpu_addr); 4379 if (ret) 4380 goto rel_trip; 4381 4382 ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr); 4383 if (ret) 4384 goto rel_bo; 4385 4386 adev->psp.spirom_dump_trip = bo_triplet; 4387 mutex_unlock(&adev->psp.mutex); 4388 return 0; 4389 rel_bo: 4390 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4391 &bo_triplet->cpu_addr); 4392 rel_trip: 4393 kfree(bo_triplet); 4394 mutex_unlock(&adev->psp.mutex); 4395 dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret); 4396 return ret; 4397 } 4398 4399 static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size, 4400 loff_t *pos) 4401 { 4402 struct amdgpu_device *adev = filp->f_inode->i_private; 4403 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4404 4405 if (!bo_triplet) 4406 return -EINVAL; 4407 4408 return simple_read_from_buffer(buf, 4409 size, 4410 pos, bo_triplet->cpu_addr, 4411 AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4412 } 4413 4414 static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp) 4415 { 4416 struct amdgpu_device *adev = filp->f_inode->i_private; 4417 struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip; 4418 4419 if (bo_triplet) { 4420 amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr, 4421 &bo_triplet->cpu_addr); 4422 kfree(bo_triplet); 4423 } 4424 4425 adev->psp.spirom_dump_trip = NULL; 4426 return 0; 4427 } 4428 4429 static const struct file_operations psp_dump_spirom_debugfs_ops = { 4430 .owner = THIS_MODULE, 4431 .open = psp_read_spirom_debugfs_open, 4432 .read = psp_read_spirom_debugfs_read, 4433 .release = psp_read_spirom_debugfs_release, 4434 .llseek = default_llseek, 4435 }; 4436 #endif 4437 4438 void amdgpu_psp_debugfs_init(struct amdgpu_device *adev) 4439 { 4440 #if defined(CONFIG_DEBUG_FS) 4441 struct drm_minor *minor = adev_to_drm(adev)->primary; 4442 4443 debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root, 4444 adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2); 4445 #endif 4446 } 4447 4448 const struct amd_ip_funcs psp_ip_funcs = { 4449 .name = "psp", 4450 .early_init = psp_early_init, 4451 .sw_init = psp_sw_init, 4452 .sw_fini = psp_sw_fini, 4453 .hw_init = psp_hw_init, 4454 .hw_fini = psp_hw_fini, 4455 .suspend = psp_suspend, 4456 .resume = psp_resume, 4457 .set_clockgating_state = psp_set_clockgating_state, 4458 .set_powergating_state = psp_set_powergating_state, 4459 }; 4460 4461 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 4462 .type = AMD_IP_BLOCK_TYPE_PSP, 4463 .major = 3, 4464 .minor = 1, 4465 .rev = 0, 4466 .funcs = &psp_ip_funcs, 4467 }; 4468 4469 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 4470 .type = AMD_IP_BLOCK_TYPE_PSP, 4471 .major = 10, 4472 .minor = 0, 4473 .rev = 0, 4474 .funcs = &psp_ip_funcs, 4475 }; 4476 4477 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 4478 .type = AMD_IP_BLOCK_TYPE_PSP, 4479 .major = 11, 4480 .minor = 0, 4481 .rev = 0, 4482 .funcs = &psp_ip_funcs, 4483 }; 4484 4485 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 4486 .type = AMD_IP_BLOCK_TYPE_PSP, 4487 .major = 11, 4488 .minor = 0, 4489 .rev = 8, 4490 .funcs = &psp_ip_funcs, 4491 }; 4492 4493 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 4494 .type = AMD_IP_BLOCK_TYPE_PSP, 4495 .major = 12, 4496 .minor = 0, 4497 .rev = 0, 4498 .funcs = &psp_ip_funcs, 4499 }; 4500 4501 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4502 .type = AMD_IP_BLOCK_TYPE_PSP, 4503 .major = 13, 4504 .minor = 0, 4505 .rev = 0, 4506 .funcs = &psp_ip_funcs, 4507 }; 4508 4509 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4510 .type = AMD_IP_BLOCK_TYPE_PSP, 4511 .major = 13, 4512 .minor = 0, 4513 .rev = 4, 4514 .funcs = &psp_ip_funcs, 4515 }; 4516 4517 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4518 .type = AMD_IP_BLOCK_TYPE_PSP, 4519 .major = 14, 4520 .minor = 0, 4521 .rev = 0, 4522 .funcs = &psp_ip_funcs, 4523 }; 4524