1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 43 #include "amdgpu_ras.h" 44 #include "amdgpu_securedisplay.h" 45 #include "amdgpu_atomfirmware.h" 46 47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16) 48 49 static int psp_load_smu_fw(struct psp_context *psp); 50 static int psp_rap_terminate(struct psp_context *psp); 51 static int psp_securedisplay_terminate(struct psp_context *psp); 52 53 static int psp_ring_init(struct psp_context *psp, 54 enum psp_ring_type ring_type) 55 { 56 int ret = 0; 57 struct psp_ring *ring; 58 struct amdgpu_device *adev = psp->adev; 59 60 ring = &psp->km_ring; 61 62 ring->ring_type = ring_type; 63 64 /* allocate 4k Page of Local Frame Buffer memory for ring */ 65 ring->ring_size = 0x1000; 66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 67 AMDGPU_GEM_DOMAIN_VRAM | 68 AMDGPU_GEM_DOMAIN_GTT, 69 &adev->firmware.rbuf, 70 &ring->ring_mem_mc_addr, 71 (void **)&ring->ring_mem); 72 if (ret) { 73 ring->ring_size = 0; 74 return ret; 75 } 76 77 return 0; 78 } 79 80 /* 81 * Due to DF Cstate management centralized to PMFW, the firmware 82 * loading sequence will be updated as below: 83 * - Load KDB 84 * - Load SYS_DRV 85 * - Load tOS 86 * - Load PMFW 87 * - Setup TMR 88 * - Load other non-psp fw 89 * - Load ASD 90 * - Load XGMI/RAS/HDCP/DTM TA if any 91 * 92 * This new sequence is required for 93 * - Arcturus and onwards 94 */ 95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 96 { 97 struct amdgpu_device *adev = psp->adev; 98 99 if (amdgpu_sriov_vf(adev)) { 100 psp->pmfw_centralized_cstate_management = false; 101 return; 102 } 103 104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 105 case IP_VERSION(11, 0, 0): 106 case IP_VERSION(11, 0, 4): 107 case IP_VERSION(11, 0, 5): 108 case IP_VERSION(11, 0, 7): 109 case IP_VERSION(11, 0, 9): 110 case IP_VERSION(11, 0, 11): 111 case IP_VERSION(11, 0, 12): 112 case IP_VERSION(11, 0, 13): 113 case IP_VERSION(13, 0, 0): 114 case IP_VERSION(13, 0, 2): 115 case IP_VERSION(13, 0, 7): 116 psp->pmfw_centralized_cstate_management = true; 117 break; 118 default: 119 psp->pmfw_centralized_cstate_management = false; 120 break; 121 } 122 } 123 124 static int psp_init_sriov_microcode(struct psp_context *psp) 125 { 126 struct amdgpu_device *adev = psp->adev; 127 char ucode_prefix[30]; 128 int ret = 0; 129 130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 131 132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 133 case IP_VERSION(9, 0, 0): 134 case IP_VERSION(11, 0, 7): 135 case IP_VERSION(11, 0, 9): 136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 137 ret = psp_init_cap_microcode(psp, ucode_prefix); 138 break; 139 case IP_VERSION(13, 0, 2): 140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 141 ret = psp_init_cap_microcode(psp, ucode_prefix); 142 ret &= psp_init_ta_microcode(psp, ucode_prefix); 143 break; 144 case IP_VERSION(13, 0, 0): 145 adev->virt.autoload_ucode_id = 0; 146 break; 147 case IP_VERSION(13, 0, 6): 148 case IP_VERSION(13, 0, 14): 149 ret = psp_init_cap_microcode(psp, ucode_prefix); 150 ret &= psp_init_ta_microcode(psp, ucode_prefix); 151 break; 152 case IP_VERSION(13, 0, 10): 153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 154 ret = psp_init_cap_microcode(psp, ucode_prefix); 155 break; 156 default: 157 return -EINVAL; 158 } 159 return ret; 160 } 161 162 static int psp_early_init(struct amdgpu_ip_block *ip_block) 163 { 164 struct amdgpu_device *adev = ip_block->adev; 165 struct psp_context *psp = &adev->psp; 166 167 psp->autoload_supported = true; 168 psp->boot_time_tmr = true; 169 170 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 171 case IP_VERSION(9, 0, 0): 172 psp_v3_1_set_psp_funcs(psp); 173 psp->autoload_supported = false; 174 psp->boot_time_tmr = false; 175 break; 176 case IP_VERSION(10, 0, 0): 177 case IP_VERSION(10, 0, 1): 178 psp_v10_0_set_psp_funcs(psp); 179 psp->autoload_supported = false; 180 psp->boot_time_tmr = false; 181 break; 182 case IP_VERSION(11, 0, 2): 183 case IP_VERSION(11, 0, 4): 184 psp_v11_0_set_psp_funcs(psp); 185 psp->autoload_supported = false; 186 psp->boot_time_tmr = false; 187 break; 188 case IP_VERSION(11, 0, 0): 189 case IP_VERSION(11, 0, 7): 190 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 191 fallthrough; 192 case IP_VERSION(11, 0, 5): 193 case IP_VERSION(11, 0, 9): 194 case IP_VERSION(11, 0, 11): 195 case IP_VERSION(11, 5, 0): 196 case IP_VERSION(11, 5, 2): 197 case IP_VERSION(11, 0, 12): 198 case IP_VERSION(11, 0, 13): 199 psp_v11_0_set_psp_funcs(psp); 200 psp->boot_time_tmr = false; 201 break; 202 case IP_VERSION(11, 0, 3): 203 case IP_VERSION(12, 0, 1): 204 psp_v12_0_set_psp_funcs(psp); 205 psp->autoload_supported = false; 206 psp->boot_time_tmr = false; 207 break; 208 case IP_VERSION(13, 0, 2): 209 psp->boot_time_tmr = false; 210 fallthrough; 211 case IP_VERSION(13, 0, 6): 212 case IP_VERSION(13, 0, 14): 213 psp_v13_0_set_psp_funcs(psp); 214 psp->autoload_supported = false; 215 break; 216 case IP_VERSION(13, 0, 12): 217 psp_v13_0_set_psp_funcs(psp); 218 psp->autoload_supported = false; 219 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 220 break; 221 case IP_VERSION(13, 0, 1): 222 case IP_VERSION(13, 0, 3): 223 case IP_VERSION(13, 0, 5): 224 case IP_VERSION(13, 0, 8): 225 case IP_VERSION(13, 0, 11): 226 case IP_VERSION(14, 0, 0): 227 case IP_VERSION(14, 0, 1): 228 case IP_VERSION(14, 0, 4): 229 psp_v13_0_set_psp_funcs(psp); 230 psp->boot_time_tmr = false; 231 break; 232 case IP_VERSION(11, 0, 8): 233 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 234 psp_v11_0_8_set_psp_funcs(psp); 235 } 236 psp->autoload_supported = false; 237 psp->boot_time_tmr = false; 238 break; 239 case IP_VERSION(13, 0, 0): 240 case IP_VERSION(13, 0, 7): 241 case IP_VERSION(13, 0, 10): 242 psp_v13_0_set_psp_funcs(psp); 243 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 244 psp->boot_time_tmr = false; 245 break; 246 case IP_VERSION(13, 0, 4): 247 psp_v13_0_4_set_psp_funcs(psp); 248 psp->boot_time_tmr = false; 249 break; 250 case IP_VERSION(14, 0, 2): 251 case IP_VERSION(14, 0, 3): 252 psp_v14_0_set_psp_funcs(psp); 253 break; 254 case IP_VERSION(14, 0, 5): 255 psp_v14_0_set_psp_funcs(psp); 256 psp->boot_time_tmr = false; 257 break; 258 default: 259 return -EINVAL; 260 } 261 262 psp->adev = adev; 263 264 adev->psp_timeout = 20000; 265 266 psp_check_pmfw_centralized_cstate_management(psp); 267 268 if (amdgpu_sriov_vf(adev)) 269 return psp_init_sriov_microcode(psp); 270 else 271 return psp_init_microcode(psp); 272 } 273 274 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 275 { 276 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 277 &mem_ctx->shared_buf); 278 mem_ctx->shared_bo = NULL; 279 } 280 281 static void psp_free_shared_bufs(struct psp_context *psp) 282 { 283 void *tmr_buf; 284 void **pptr; 285 286 /* free TMR memory buffer */ 287 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 288 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 289 psp->tmr_bo = NULL; 290 291 /* free xgmi shared memory */ 292 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 293 294 /* free ras shared memory */ 295 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 296 297 /* free hdcp shared memory */ 298 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 299 300 /* free dtm shared memory */ 301 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 302 303 /* free rap shared memory */ 304 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 305 306 /* free securedisplay shared memory */ 307 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 308 309 310 } 311 312 static void psp_memory_training_fini(struct psp_context *psp) 313 { 314 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 315 316 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 317 kfree(ctx->sys_cache); 318 ctx->sys_cache = NULL; 319 } 320 321 static int psp_memory_training_init(struct psp_context *psp) 322 { 323 int ret; 324 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 325 326 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 327 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 328 return 0; 329 } 330 331 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 332 if (ctx->sys_cache == NULL) { 333 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 334 ret = -ENOMEM; 335 goto Err_out; 336 } 337 338 dev_dbg(psp->adev->dev, 339 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 340 ctx->train_data_size, 341 ctx->p2c_train_data_offset, 342 ctx->c2p_train_data_offset); 343 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 344 return 0; 345 346 Err_out: 347 psp_memory_training_fini(psp); 348 return ret; 349 } 350 351 /* 352 * Helper funciton to query psp runtime database entry 353 * 354 * @adev: amdgpu_device pointer 355 * @entry_type: the type of psp runtime database entry 356 * @db_entry: runtime database entry pointer 357 * 358 * Return false if runtime database doesn't exit or entry is invalid 359 * or true if the specific database entry is found, and copy to @db_entry 360 */ 361 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 362 enum psp_runtime_entry_type entry_type, 363 void *db_entry) 364 { 365 uint64_t db_header_pos, db_dir_pos; 366 struct psp_runtime_data_header db_header = {0}; 367 struct psp_runtime_data_directory db_dir = {0}; 368 bool ret = false; 369 int i; 370 371 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 372 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 373 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 374 return false; 375 376 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 377 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 378 379 /* read runtime db header from vram */ 380 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 381 sizeof(struct psp_runtime_data_header), false); 382 383 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 384 /* runtime db doesn't exist, exit */ 385 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 386 return false; 387 } 388 389 /* read runtime database entry from vram */ 390 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 391 sizeof(struct psp_runtime_data_directory), false); 392 393 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 394 /* invalid db entry count, exit */ 395 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 396 return false; 397 } 398 399 /* look up for requested entry type */ 400 for (i = 0; i < db_dir.entry_count && !ret; i++) { 401 if (db_dir.entry_list[i].entry_type == entry_type) { 402 switch (entry_type) { 403 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 404 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 405 /* invalid db entry size */ 406 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 407 return false; 408 } 409 /* read runtime database entry */ 410 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 411 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 412 ret = true; 413 break; 414 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 415 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 416 /* invalid db entry size */ 417 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 418 return false; 419 } 420 /* read runtime database entry */ 421 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 422 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 423 ret = true; 424 break; 425 default: 426 ret = false; 427 break; 428 } 429 } 430 } 431 432 return ret; 433 } 434 435 static int psp_sw_init(struct amdgpu_ip_block *ip_block) 436 { 437 struct amdgpu_device *adev = ip_block->adev; 438 struct psp_context *psp = &adev->psp; 439 int ret; 440 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 441 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 442 struct psp_runtime_scpm_entry scpm_entry; 443 444 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 445 if (!psp->cmd) { 446 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 447 ret = -ENOMEM; 448 } 449 450 adev->psp.xgmi_context.supports_extended_data = 451 !adev->gmc.xgmi.connected_to_cpu && 452 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 453 454 memset(&scpm_entry, 0, sizeof(scpm_entry)); 455 if ((psp_get_runtime_db_entry(adev, 456 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 457 &scpm_entry)) && 458 (scpm_entry.scpm_status != SCPM_DISABLE)) { 459 adev->scpm_enabled = true; 460 adev->scpm_status = scpm_entry.scpm_status; 461 } else { 462 adev->scpm_enabled = false; 463 adev->scpm_status = SCPM_DISABLE; 464 } 465 466 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 467 468 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 469 if (psp_get_runtime_db_entry(adev, 470 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 471 &boot_cfg_entry)) { 472 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 473 if ((psp->boot_cfg_bitmask) & 474 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 475 /* If psp runtime database exists, then 476 * only enable two stage memory training 477 * when TWO_STAGE_DRAM_TRAINING bit is set 478 * in runtime database 479 */ 480 mem_training_ctx->enable_mem_training = true; 481 } 482 483 } else { 484 /* If psp runtime database doesn't exist or is 485 * invalid, force enable two stage memory training 486 */ 487 mem_training_ctx->enable_mem_training = true; 488 } 489 490 if (mem_training_ctx->enable_mem_training) { 491 ret = psp_memory_training_init(psp); 492 if (ret) { 493 dev_err(adev->dev, "Failed to initialize memory training!\n"); 494 return ret; 495 } 496 497 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 498 if (ret) { 499 dev_err(adev->dev, "Failed to process memory training!\n"); 500 return ret; 501 } 502 } 503 504 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 505 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 506 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 507 &psp->fw_pri_bo, 508 &psp->fw_pri_mc_addr, 509 &psp->fw_pri_buf); 510 if (ret) 511 return ret; 512 513 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 514 AMDGPU_GEM_DOMAIN_VRAM | 515 AMDGPU_GEM_DOMAIN_GTT, 516 &psp->fence_buf_bo, 517 &psp->fence_buf_mc_addr, 518 &psp->fence_buf); 519 if (ret) 520 goto failed1; 521 522 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 523 AMDGPU_GEM_DOMAIN_VRAM | 524 AMDGPU_GEM_DOMAIN_GTT, 525 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 526 (void **)&psp->cmd_buf_mem); 527 if (ret) 528 goto failed2; 529 530 return 0; 531 532 failed2: 533 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 534 &psp->fence_buf_mc_addr, &psp->fence_buf); 535 failed1: 536 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 537 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 538 return ret; 539 } 540 541 static int psp_sw_fini(struct amdgpu_ip_block *ip_block) 542 { 543 struct amdgpu_device *adev = ip_block->adev; 544 struct psp_context *psp = &adev->psp; 545 546 psp_memory_training_fini(psp); 547 548 amdgpu_ucode_release(&psp->sos_fw); 549 amdgpu_ucode_release(&psp->asd_fw); 550 amdgpu_ucode_release(&psp->ta_fw); 551 amdgpu_ucode_release(&psp->cap_fw); 552 amdgpu_ucode_release(&psp->toc_fw); 553 554 kfree(psp->cmd); 555 psp->cmd = NULL; 556 557 psp_free_shared_bufs(psp); 558 559 if (psp->km_ring.ring_mem) 560 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 561 &psp->km_ring.ring_mem_mc_addr, 562 (void **)&psp->km_ring.ring_mem); 563 564 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 565 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 566 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 567 &psp->fence_buf_mc_addr, &psp->fence_buf); 568 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 569 (void **)&psp->cmd_buf_mem); 570 571 return 0; 572 } 573 574 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 575 uint32_t reg_val, uint32_t mask, bool check_changed) 576 { 577 uint32_t val; 578 int i; 579 struct amdgpu_device *adev = psp->adev; 580 581 if (psp->adev->no_hw_access) 582 return 0; 583 584 for (i = 0; i < adev->usec_timeout; i++) { 585 val = RREG32(reg_index); 586 if (check_changed) { 587 if (val != reg_val) 588 return 0; 589 } else { 590 if ((val & mask) == reg_val) 591 return 0; 592 } 593 udelay(1); 594 } 595 596 return -ETIME; 597 } 598 599 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 600 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 601 { 602 uint32_t val; 603 int i; 604 struct amdgpu_device *adev = psp->adev; 605 606 if (psp->adev->no_hw_access) 607 return 0; 608 609 for (i = 0; i < msec_timeout; i++) { 610 val = RREG32(reg_index); 611 if ((val & mask) == reg_val) 612 return 0; 613 msleep(1); 614 } 615 616 return -ETIME; 617 } 618 619 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 620 { 621 switch (cmd_id) { 622 case GFX_CMD_ID_LOAD_TA: 623 return "LOAD_TA"; 624 case GFX_CMD_ID_UNLOAD_TA: 625 return "UNLOAD_TA"; 626 case GFX_CMD_ID_INVOKE_CMD: 627 return "INVOKE_CMD"; 628 case GFX_CMD_ID_LOAD_ASD: 629 return "LOAD_ASD"; 630 case GFX_CMD_ID_SETUP_TMR: 631 return "SETUP_TMR"; 632 case GFX_CMD_ID_LOAD_IP_FW: 633 return "LOAD_IP_FW"; 634 case GFX_CMD_ID_DESTROY_TMR: 635 return "DESTROY_TMR"; 636 case GFX_CMD_ID_SAVE_RESTORE: 637 return "SAVE_RESTORE_IP_FW"; 638 case GFX_CMD_ID_SETUP_VMR: 639 return "SETUP_VMR"; 640 case GFX_CMD_ID_DESTROY_VMR: 641 return "DESTROY_VMR"; 642 case GFX_CMD_ID_PROG_REG: 643 return "PROG_REG"; 644 case GFX_CMD_ID_GET_FW_ATTESTATION: 645 return "GET_FW_ATTESTATION"; 646 case GFX_CMD_ID_LOAD_TOC: 647 return "ID_LOAD_TOC"; 648 case GFX_CMD_ID_AUTOLOAD_RLC: 649 return "AUTOLOAD_RLC"; 650 case GFX_CMD_ID_BOOT_CFG: 651 return "BOOT_CFG"; 652 case GFX_CMD_ID_CONFIG_SQ_PERFMON: 653 return "CONFIG_SQ_PERFMON"; 654 default: 655 return "UNKNOWN CMD"; 656 } 657 } 658 659 static bool psp_err_warn(struct psp_context *psp) 660 { 661 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 662 663 /* This response indicates reg list is already loaded */ 664 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 665 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 666 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 667 cmd->resp.status == TEE_ERROR_CANCEL) 668 return false; 669 670 return true; 671 } 672 673 static int 674 psp_cmd_submit_buf(struct psp_context *psp, 675 struct amdgpu_firmware_info *ucode, 676 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 677 { 678 int ret; 679 int index; 680 int timeout = psp->adev->psp_timeout; 681 bool ras_intr = false; 682 bool skip_unsupport = false; 683 684 if (psp->adev->no_hw_access) 685 return 0; 686 687 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 688 689 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 690 691 index = atomic_inc_return(&psp->fence_value); 692 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 693 if (ret) { 694 atomic_dec(&psp->fence_value); 695 goto exit; 696 } 697 698 amdgpu_device_invalidate_hdp(psp->adev, NULL); 699 while (*((unsigned int *)psp->fence_buf) != index) { 700 if (--timeout == 0) 701 break; 702 /* 703 * Shouldn't wait for timeout when err_event_athub occurs, 704 * because gpu reset thread triggered and lock resource should 705 * be released for psp resume sequence. 706 */ 707 ras_intr = amdgpu_ras_intr_triggered(); 708 if (ras_intr) 709 break; 710 usleep_range(10, 100); 711 amdgpu_device_invalidate_hdp(psp->adev, NULL); 712 } 713 714 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 715 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 716 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 717 718 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 719 720 /* In some cases, psp response status is not 0 even there is no 721 * problem while the command is submitted. Some version of PSP FW 722 * doesn't write 0 to that field. 723 * So here we would like to only print a warning instead of an error 724 * during psp initialization to avoid breaking hw_init and it doesn't 725 * return -EINVAL. 726 */ 727 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 728 if (ucode) 729 dev_warn(psp->adev->dev, 730 "failed to load ucode %s(0x%X) ", 731 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 732 if (psp_err_warn(psp)) 733 dev_warn( 734 psp->adev->dev, 735 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 736 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 737 psp->cmd_buf_mem->cmd_id, 738 psp->cmd_buf_mem->resp.status); 739 /* If any firmware (including CAP) load fails under SRIOV, it should 740 * return failure to stop the VF from initializing. 741 * Also return failure in case of timeout 742 */ 743 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 744 ret = -EINVAL; 745 goto exit; 746 } 747 } 748 749 if (ucode) { 750 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 751 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 752 } 753 754 exit: 755 return ret; 756 } 757 758 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 759 { 760 struct psp_gfx_cmd_resp *cmd = psp->cmd; 761 762 mutex_lock(&psp->mutex); 763 764 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 765 766 return cmd; 767 } 768 769 static void release_psp_cmd_buf(struct psp_context *psp) 770 { 771 mutex_unlock(&psp->mutex); 772 } 773 774 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 775 struct psp_gfx_cmd_resp *cmd, 776 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 777 { 778 struct amdgpu_device *adev = psp->adev; 779 uint32_t size = 0; 780 uint64_t tmr_pa = 0; 781 782 if (tmr_bo) { 783 size = amdgpu_bo_size(tmr_bo); 784 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 785 } 786 787 if (amdgpu_sriov_vf(psp->adev)) 788 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 789 else 790 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 791 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 792 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 793 cmd->cmd.cmd_setup_tmr.buf_size = size; 794 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 795 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 796 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 797 } 798 799 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 800 uint64_t pri_buf_mc, uint32_t size) 801 { 802 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 803 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 804 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 805 cmd->cmd.cmd_load_toc.toc_size = size; 806 } 807 808 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 809 static int psp_load_toc(struct psp_context *psp, 810 uint32_t *tmr_size) 811 { 812 int ret; 813 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 814 815 /* Copy toc to psp firmware private buffer */ 816 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 817 818 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 819 820 ret = psp_cmd_submit_buf(psp, NULL, cmd, 821 psp->fence_buf_mc_addr); 822 if (!ret) 823 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 824 825 release_psp_cmd_buf(psp); 826 827 return ret; 828 } 829 830 /* Set up Trusted Memory Region */ 831 static int psp_tmr_init(struct psp_context *psp) 832 { 833 int ret = 0; 834 int tmr_size; 835 void *tmr_buf; 836 void **pptr; 837 838 /* 839 * According to HW engineer, they prefer the TMR address be "naturally 840 * aligned" , e.g. the start address be an integer divide of TMR size. 841 * 842 * Note: this memory need be reserved till the driver 843 * uninitializes. 844 */ 845 tmr_size = PSP_TMR_SIZE(psp->adev); 846 847 /* For ASICs support RLC autoload, psp will parse the toc 848 * and calculate the total size of TMR needed 849 */ 850 if (!amdgpu_sriov_vf(psp->adev) && 851 psp->toc.start_addr && 852 psp->toc.size_bytes && 853 psp->fw_pri_buf) { 854 ret = psp_load_toc(psp, &tmr_size); 855 if (ret) { 856 dev_err(psp->adev->dev, "Failed to load toc\n"); 857 return ret; 858 } 859 } 860 861 if (!psp->tmr_bo && !psp->boot_time_tmr) { 862 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 863 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 864 PSP_TMR_ALIGNMENT, 865 AMDGPU_HAS_VRAM(psp->adev) ? 866 AMDGPU_GEM_DOMAIN_VRAM : 867 AMDGPU_GEM_DOMAIN_GTT, 868 &psp->tmr_bo, &psp->tmr_mc_addr, 869 pptr); 870 } 871 872 return ret; 873 } 874 875 static bool psp_skip_tmr(struct psp_context *psp) 876 { 877 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 878 case IP_VERSION(11, 0, 9): 879 case IP_VERSION(11, 0, 7): 880 case IP_VERSION(13, 0, 2): 881 case IP_VERSION(13, 0, 6): 882 case IP_VERSION(13, 0, 10): 883 case IP_VERSION(13, 0, 12): 884 case IP_VERSION(13, 0, 14): 885 return true; 886 default: 887 return false; 888 } 889 } 890 891 static int psp_tmr_load(struct psp_context *psp) 892 { 893 int ret; 894 struct psp_gfx_cmd_resp *cmd; 895 896 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 897 * Already set up by host driver. 898 */ 899 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 900 return 0; 901 902 cmd = acquire_psp_cmd_buf(psp); 903 904 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 905 if (psp->tmr_bo) 906 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 907 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 908 909 ret = psp_cmd_submit_buf(psp, NULL, cmd, 910 psp->fence_buf_mc_addr); 911 912 release_psp_cmd_buf(psp); 913 914 return ret; 915 } 916 917 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 918 struct psp_gfx_cmd_resp *cmd) 919 { 920 if (amdgpu_sriov_vf(psp->adev)) 921 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 922 else 923 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 924 } 925 926 static int psp_tmr_unload(struct psp_context *psp) 927 { 928 int ret; 929 struct psp_gfx_cmd_resp *cmd; 930 931 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 932 * as TMR is not loaded at all 933 */ 934 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 935 return 0; 936 937 cmd = acquire_psp_cmd_buf(psp); 938 939 psp_prep_tmr_unload_cmd_buf(psp, cmd); 940 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 941 942 ret = psp_cmd_submit_buf(psp, NULL, cmd, 943 psp->fence_buf_mc_addr); 944 945 release_psp_cmd_buf(psp); 946 947 return ret; 948 } 949 950 static int psp_tmr_terminate(struct psp_context *psp) 951 { 952 return psp_tmr_unload(psp); 953 } 954 955 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 956 uint64_t *output_ptr) 957 { 958 int ret; 959 struct psp_gfx_cmd_resp *cmd; 960 961 if (!output_ptr) 962 return -EINVAL; 963 964 if (amdgpu_sriov_vf(psp->adev)) 965 return 0; 966 967 cmd = acquire_psp_cmd_buf(psp); 968 969 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 970 971 ret = psp_cmd_submit_buf(psp, NULL, cmd, 972 psp->fence_buf_mc_addr); 973 974 if (!ret) { 975 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 976 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 977 } 978 979 release_psp_cmd_buf(psp); 980 981 return ret; 982 } 983 984 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 985 { 986 struct psp_context *psp = &adev->psp; 987 struct psp_gfx_cmd_resp *cmd; 988 int ret; 989 990 if (amdgpu_sriov_vf(adev)) 991 return 0; 992 993 cmd = acquire_psp_cmd_buf(psp); 994 995 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 996 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 997 998 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 999 if (!ret) { 1000 *boot_cfg = 1001 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 1002 } 1003 1004 release_psp_cmd_buf(psp); 1005 1006 return ret; 1007 } 1008 1009 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 1010 { 1011 int ret; 1012 struct psp_context *psp = &adev->psp; 1013 struct psp_gfx_cmd_resp *cmd; 1014 1015 if (amdgpu_sriov_vf(adev)) 1016 return 0; 1017 1018 cmd = acquire_psp_cmd_buf(psp); 1019 1020 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1021 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 1022 cmd->cmd.boot_cfg.boot_config = boot_cfg; 1023 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 1024 1025 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1026 1027 release_psp_cmd_buf(psp); 1028 1029 return ret; 1030 } 1031 1032 static int psp_rl_load(struct amdgpu_device *adev) 1033 { 1034 int ret; 1035 struct psp_context *psp = &adev->psp; 1036 struct psp_gfx_cmd_resp *cmd; 1037 1038 if (!is_psp_fw_valid(psp->rl)) 1039 return 0; 1040 1041 cmd = acquire_psp_cmd_buf(psp); 1042 1043 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1044 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1045 1046 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1047 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1048 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1049 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1050 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1051 1052 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1053 1054 release_psp_cmd_buf(psp); 1055 1056 return ret; 1057 } 1058 1059 int psp_memory_partition(struct psp_context *psp, int mode) 1060 { 1061 struct psp_gfx_cmd_resp *cmd; 1062 int ret; 1063 1064 if (amdgpu_sriov_vf(psp->adev)) 1065 return 0; 1066 1067 cmd = acquire_psp_cmd_buf(psp); 1068 1069 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; 1070 cmd->cmd.cmd_memory_part.mode = mode; 1071 1072 dev_info(psp->adev->dev, 1073 "Requesting %d memory partition change through PSP", mode); 1074 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1075 if (ret) 1076 dev_err(psp->adev->dev, 1077 "PSP request failed to change to NPS%d mode\n", mode); 1078 1079 release_psp_cmd_buf(psp); 1080 1081 return ret; 1082 } 1083 1084 int psp_spatial_partition(struct psp_context *psp, int mode) 1085 { 1086 struct psp_gfx_cmd_resp *cmd; 1087 int ret; 1088 1089 if (amdgpu_sriov_vf(psp->adev)) 1090 return 0; 1091 1092 cmd = acquire_psp_cmd_buf(psp); 1093 1094 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1095 cmd->cmd.cmd_spatial_part.mode = mode; 1096 1097 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1098 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1099 1100 release_psp_cmd_buf(psp); 1101 1102 return ret; 1103 } 1104 1105 static int psp_asd_initialize(struct psp_context *psp) 1106 { 1107 int ret; 1108 1109 /* If PSP version doesn't match ASD version, asd loading will be failed. 1110 * add workaround to bypass it for sriov now. 1111 * TODO: add version check to make it common 1112 */ 1113 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1114 return 0; 1115 1116 /* bypass asd if display hardware is not available */ 1117 if (!amdgpu_device_has_display_hardware(psp->adev) && 1118 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1119 return 0; 1120 1121 psp->asd_context.mem_context.shared_mc_addr = 0; 1122 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1123 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1124 1125 ret = psp_ta_load(psp, &psp->asd_context); 1126 if (!ret) 1127 psp->asd_context.initialized = true; 1128 1129 return ret; 1130 } 1131 1132 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1133 uint32_t session_id) 1134 { 1135 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1136 cmd->cmd.cmd_unload_ta.session_id = session_id; 1137 } 1138 1139 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1140 { 1141 int ret; 1142 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1143 1144 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1145 1146 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1147 1148 context->resp_status = cmd->resp.status; 1149 1150 release_psp_cmd_buf(psp); 1151 1152 return ret; 1153 } 1154 1155 static int psp_asd_terminate(struct psp_context *psp) 1156 { 1157 int ret; 1158 1159 if (amdgpu_sriov_vf(psp->adev)) 1160 return 0; 1161 1162 if (!psp->asd_context.initialized) 1163 return 0; 1164 1165 ret = psp_ta_unload(psp, &psp->asd_context); 1166 if (!ret) 1167 psp->asd_context.initialized = false; 1168 1169 return ret; 1170 } 1171 1172 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1173 uint32_t id, uint32_t value) 1174 { 1175 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1176 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1177 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1178 } 1179 1180 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1181 uint32_t value) 1182 { 1183 struct psp_gfx_cmd_resp *cmd; 1184 int ret = 0; 1185 1186 if (reg >= PSP_REG_LAST) 1187 return -EINVAL; 1188 1189 cmd = acquire_psp_cmd_buf(psp); 1190 1191 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1192 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1193 if (ret) 1194 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1195 1196 release_psp_cmd_buf(psp); 1197 1198 return ret; 1199 } 1200 1201 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1202 uint64_t ta_bin_mc, 1203 struct ta_context *context) 1204 { 1205 cmd->cmd_id = context->ta_load_type; 1206 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1207 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1208 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1209 1210 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1211 lower_32_bits(context->mem_context.shared_mc_addr); 1212 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1213 upper_32_bits(context->mem_context.shared_mc_addr); 1214 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1215 } 1216 1217 int psp_ta_init_shared_buf(struct psp_context *psp, 1218 struct ta_mem_context *mem_ctx) 1219 { 1220 /* 1221 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1222 * physical) for ta to host memory 1223 */ 1224 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1225 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1226 AMDGPU_GEM_DOMAIN_GTT, 1227 &mem_ctx->shared_bo, 1228 &mem_ctx->shared_mc_addr, 1229 &mem_ctx->shared_buf); 1230 } 1231 1232 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1233 uint32_t ta_cmd_id, 1234 uint32_t session_id) 1235 { 1236 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1237 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1238 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1239 } 1240 1241 int psp_ta_invoke(struct psp_context *psp, 1242 uint32_t ta_cmd_id, 1243 struct ta_context *context) 1244 { 1245 int ret; 1246 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1247 1248 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1249 1250 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1251 psp->fence_buf_mc_addr); 1252 1253 context->resp_status = cmd->resp.status; 1254 1255 release_psp_cmd_buf(psp); 1256 1257 return ret; 1258 } 1259 1260 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1261 { 1262 int ret; 1263 struct psp_gfx_cmd_resp *cmd; 1264 1265 cmd = acquire_psp_cmd_buf(psp); 1266 1267 psp_copy_fw(psp, context->bin_desc.start_addr, 1268 context->bin_desc.size_bytes); 1269 1270 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1271 1272 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1273 psp->fence_buf_mc_addr); 1274 1275 context->resp_status = cmd->resp.status; 1276 1277 if (!ret) 1278 context->session_id = cmd->resp.session_id; 1279 1280 release_psp_cmd_buf(psp); 1281 1282 return ret; 1283 } 1284 1285 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1286 { 1287 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1288 } 1289 1290 int psp_xgmi_terminate(struct psp_context *psp) 1291 { 1292 int ret; 1293 struct amdgpu_device *adev = psp->adev; 1294 1295 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1296 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1297 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1298 adev->gmc.xgmi.connected_to_cpu)) 1299 return 0; 1300 1301 if (!psp->xgmi_context.context.initialized) 1302 return 0; 1303 1304 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1305 1306 psp->xgmi_context.context.initialized = false; 1307 1308 return ret; 1309 } 1310 1311 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1312 { 1313 struct ta_xgmi_shared_memory *xgmi_cmd; 1314 int ret; 1315 1316 if (!psp->ta_fw || 1317 !psp->xgmi_context.context.bin_desc.size_bytes || 1318 !psp->xgmi_context.context.bin_desc.start_addr) 1319 return -ENOENT; 1320 1321 if (!load_ta) 1322 goto invoke; 1323 1324 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1325 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1326 1327 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1328 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1329 if (ret) 1330 return ret; 1331 } 1332 1333 /* Load XGMI TA */ 1334 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1335 if (!ret) 1336 psp->xgmi_context.context.initialized = true; 1337 else 1338 return ret; 1339 1340 invoke: 1341 /* Initialize XGMI session */ 1342 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1343 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1344 xgmi_cmd->flag_extend_link_record = set_extended_data; 1345 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1346 1347 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1348 /* note down the capbility flag for XGMI TA */ 1349 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1350 1351 return ret; 1352 } 1353 1354 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1355 { 1356 struct ta_xgmi_shared_memory *xgmi_cmd; 1357 int ret; 1358 1359 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1360 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1361 1362 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1363 1364 /* Invoke xgmi ta to get hive id */ 1365 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1366 if (ret) 1367 return ret; 1368 1369 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1370 1371 return 0; 1372 } 1373 1374 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1375 { 1376 struct ta_xgmi_shared_memory *xgmi_cmd; 1377 int ret; 1378 1379 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1380 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1381 1382 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1383 1384 /* Invoke xgmi ta to get the node id */ 1385 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1386 if (ret) 1387 return ret; 1388 1389 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1390 1391 return 0; 1392 } 1393 1394 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1395 { 1396 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1397 IP_VERSION(13, 0, 2) && 1398 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1399 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1400 IP_VERSION(13, 0, 6); 1401 } 1402 1403 /* 1404 * Chips that support extended topology information require the driver to 1405 * reflect topology information in the opposite direction. This is 1406 * because the TA has already exceeded its link record limit and if the 1407 * TA holds bi-directional information, the driver would have to do 1408 * multiple fetches instead of just two. 1409 */ 1410 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1411 struct psp_xgmi_node_info node_info) 1412 { 1413 struct amdgpu_device *mirror_adev; 1414 struct amdgpu_hive_info *hive; 1415 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1416 uint64_t dst_node_id = node_info.node_id; 1417 uint8_t dst_num_hops = node_info.num_hops; 1418 uint8_t dst_num_links = node_info.num_links; 1419 1420 hive = amdgpu_get_xgmi_hive(psp->adev); 1421 if (WARN_ON(!hive)) 1422 return; 1423 1424 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1425 struct psp_xgmi_topology_info *mirror_top_info; 1426 int j; 1427 1428 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1429 continue; 1430 1431 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1432 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1433 if (mirror_top_info->nodes[j].node_id != src_node_id) 1434 continue; 1435 1436 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1437 /* 1438 * prevent 0 num_links value re-reflection since reflection 1439 * criteria is based on num_hops (direct or indirect). 1440 * 1441 */ 1442 if (dst_num_links) 1443 mirror_top_info->nodes[j].num_links = dst_num_links; 1444 1445 break; 1446 } 1447 1448 break; 1449 } 1450 1451 amdgpu_put_xgmi_hive(hive); 1452 } 1453 1454 int psp_xgmi_get_topology_info(struct psp_context *psp, 1455 int number_devices, 1456 struct psp_xgmi_topology_info *topology, 1457 bool get_extended_data) 1458 { 1459 struct ta_xgmi_shared_memory *xgmi_cmd; 1460 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1461 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1462 int i; 1463 int ret; 1464 1465 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1466 return -EINVAL; 1467 1468 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1469 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1470 xgmi_cmd->flag_extend_link_record = get_extended_data; 1471 1472 /* Fill in the shared memory with topology information as input */ 1473 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1474 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1475 topology_info_input->num_nodes = number_devices; 1476 1477 for (i = 0; i < topology_info_input->num_nodes; i++) { 1478 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1479 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1480 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1481 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1482 } 1483 1484 /* Invoke xgmi ta to get the topology information */ 1485 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1486 if (ret) 1487 return ret; 1488 1489 /* Read the output topology information from the shared memory */ 1490 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1491 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1492 for (i = 0; i < topology->num_nodes; i++) { 1493 /* extended data will either be 0 or equal to non-extended data */ 1494 if (topology_info_output->nodes[i].num_hops) 1495 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1496 1497 /* non-extended data gets everything here so no need to update */ 1498 if (!get_extended_data) { 1499 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1500 topology->nodes[i].is_sharing_enabled = 1501 topology_info_output->nodes[i].is_sharing_enabled; 1502 topology->nodes[i].sdma_engine = 1503 topology_info_output->nodes[i].sdma_engine; 1504 } 1505 1506 } 1507 1508 /* Invoke xgmi ta again to get the link information */ 1509 if (psp_xgmi_peer_link_info_supported(psp)) { 1510 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1511 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1512 bool requires_reflection = 1513 (psp->xgmi_context.supports_extended_data && 1514 get_extended_data) || 1515 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1516 IP_VERSION(13, 0, 6) || 1517 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1518 IP_VERSION(13, 0, 14); 1519 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1520 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1521 1522 /* popluate the shared output buffer rather than the cmd input buffer 1523 * with node_ids as the input for GET_PEER_LINKS command execution. 1524 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1525 * The same requirement for GET_EXTEND_PEER_LINKS command. 1526 */ 1527 if (ta_port_num_support) { 1528 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1529 1530 for (i = 0; i < topology->num_nodes; i++) 1531 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1532 1533 link_extend_info_output->num_nodes = topology->num_nodes; 1534 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1535 } else { 1536 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1537 1538 for (i = 0; i < topology->num_nodes; i++) 1539 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1540 1541 link_info_output->num_nodes = topology->num_nodes; 1542 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1543 } 1544 1545 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1546 if (ret) 1547 return ret; 1548 1549 for (i = 0; i < topology->num_nodes; i++) { 1550 uint8_t node_num_links = ta_port_num_support ? 1551 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1552 /* accumulate num_links on extended data */ 1553 if (get_extended_data) { 1554 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1555 } else { 1556 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1557 topology->nodes[i].num_links : node_num_links; 1558 } 1559 /* popluate the connected port num info if supported and available */ 1560 if (ta_port_num_support && topology->nodes[i].num_links) { 1561 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1562 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1563 } 1564 1565 /* reflect the topology information for bi-directionality */ 1566 if (requires_reflection && topology->nodes[i].num_hops) 1567 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1568 } 1569 } 1570 1571 return 0; 1572 } 1573 1574 int psp_xgmi_set_topology_info(struct psp_context *psp, 1575 int number_devices, 1576 struct psp_xgmi_topology_info *topology) 1577 { 1578 struct ta_xgmi_shared_memory *xgmi_cmd; 1579 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1580 int i; 1581 1582 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1583 return -EINVAL; 1584 1585 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1586 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1587 1588 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1589 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1590 topology_info_input->num_nodes = number_devices; 1591 1592 for (i = 0; i < topology_info_input->num_nodes; i++) { 1593 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1594 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1595 topology_info_input->nodes[i].is_sharing_enabled = 1; 1596 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1597 } 1598 1599 /* Invoke xgmi ta to set topology information */ 1600 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1601 } 1602 1603 // ras begin 1604 static void psp_ras_ta_check_status(struct psp_context *psp) 1605 { 1606 struct ta_ras_shared_memory *ras_cmd = 1607 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1608 1609 switch (ras_cmd->ras_status) { 1610 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1611 dev_warn(psp->adev->dev, 1612 "RAS WARNING: cmd failed due to unsupported ip\n"); 1613 break; 1614 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1615 dev_warn(psp->adev->dev, 1616 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1617 break; 1618 case TA_RAS_STATUS__SUCCESS: 1619 break; 1620 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1621 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1622 dev_warn(psp->adev->dev, 1623 "RAS WARNING: Inject error to critical region is not allowed\n"); 1624 break; 1625 default: 1626 dev_warn(psp->adev->dev, 1627 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1628 break; 1629 } 1630 } 1631 1632 static int psp_ras_send_cmd(struct psp_context *psp, 1633 enum ras_command cmd_id, void *in, void *out) 1634 { 1635 struct ta_ras_shared_memory *ras_cmd; 1636 uint32_t cmd = cmd_id; 1637 int ret = 0; 1638 1639 if (!in) 1640 return -EINVAL; 1641 1642 mutex_lock(&psp->ras_context.mutex); 1643 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1644 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1645 1646 switch (cmd) { 1647 case TA_RAS_COMMAND__ENABLE_FEATURES: 1648 case TA_RAS_COMMAND__DISABLE_FEATURES: 1649 memcpy(&ras_cmd->ras_in_message, 1650 in, sizeof(ras_cmd->ras_in_message)); 1651 break; 1652 case TA_RAS_COMMAND__TRIGGER_ERROR: 1653 memcpy(&ras_cmd->ras_in_message.trigger_error, 1654 in, sizeof(ras_cmd->ras_in_message.trigger_error)); 1655 break; 1656 case TA_RAS_COMMAND__QUERY_ADDRESS: 1657 memcpy(&ras_cmd->ras_in_message.address, 1658 in, sizeof(ras_cmd->ras_in_message.address)); 1659 break; 1660 default: 1661 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd); 1662 ret = -EINVAL; 1663 goto err_out; 1664 } 1665 1666 ras_cmd->cmd_id = cmd; 1667 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1668 1669 switch (cmd) { 1670 case TA_RAS_COMMAND__TRIGGER_ERROR: 1671 if (!ret && out) 1672 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status)); 1673 break; 1674 case TA_RAS_COMMAND__QUERY_ADDRESS: 1675 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1676 ret = -EINVAL; 1677 else if (out) 1678 memcpy(out, 1679 &ras_cmd->ras_out_message.address, 1680 sizeof(ras_cmd->ras_out_message.address)); 1681 break; 1682 default: 1683 break; 1684 } 1685 1686 err_out: 1687 mutex_unlock(&psp->ras_context.mutex); 1688 1689 return ret; 1690 } 1691 1692 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1693 { 1694 struct ta_ras_shared_memory *ras_cmd; 1695 int ret; 1696 1697 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1698 1699 /* 1700 * TODO: bypass the loading in sriov for now 1701 */ 1702 if (amdgpu_sriov_vf(psp->adev)) 1703 return 0; 1704 1705 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1706 1707 if (amdgpu_ras_intr_triggered()) 1708 return ret; 1709 1710 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1711 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1712 return -EINVAL; 1713 } 1714 1715 if (!ret) { 1716 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1717 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1718 1719 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1720 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1721 dev_warn(psp->adev->dev, 1722 "RAS internal register access blocked\n"); 1723 1724 psp_ras_ta_check_status(psp); 1725 } 1726 1727 return ret; 1728 } 1729 1730 int psp_ras_enable_features(struct psp_context *psp, 1731 union ta_ras_cmd_input *info, bool enable) 1732 { 1733 enum ras_command cmd_id; 1734 int ret; 1735 1736 if (!psp->ras_context.context.initialized || !info) 1737 return -EINVAL; 1738 1739 cmd_id = enable ? 1740 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES; 1741 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL); 1742 if (ret) 1743 return -EINVAL; 1744 1745 return 0; 1746 } 1747 1748 int psp_ras_terminate(struct psp_context *psp) 1749 { 1750 int ret; 1751 1752 /* 1753 * TODO: bypass the terminate in sriov for now 1754 */ 1755 if (amdgpu_sriov_vf(psp->adev)) 1756 return 0; 1757 1758 if (!psp->ras_context.context.initialized) 1759 return 0; 1760 1761 ret = psp_ta_unload(psp, &psp->ras_context.context); 1762 1763 psp->ras_context.context.initialized = false; 1764 1765 mutex_destroy(&psp->ras_context.mutex); 1766 1767 return ret; 1768 } 1769 1770 int psp_ras_initialize(struct psp_context *psp) 1771 { 1772 int ret; 1773 uint32_t boot_cfg = 0xFF; 1774 struct amdgpu_device *adev = psp->adev; 1775 struct ta_ras_shared_memory *ras_cmd; 1776 1777 /* 1778 * TODO: bypass the initialize in sriov for now 1779 */ 1780 if (amdgpu_sriov_vf(adev)) 1781 return 0; 1782 1783 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1784 !adev->psp.ras_context.context.bin_desc.start_addr) { 1785 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1786 return 0; 1787 } 1788 1789 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1790 /* query GECC enablement status from boot config 1791 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1792 */ 1793 ret = psp_boot_config_get(adev, &boot_cfg); 1794 if (ret) 1795 dev_warn(adev->dev, "PSP get boot config failed\n"); 1796 1797 if (boot_cfg == 1 && !adev->ras_default_ecc_enabled && 1798 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1799 dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n"); 1800 dev_warn(adev->dev, 1801 "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n"); 1802 } else { 1803 if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) && 1804 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) { 1805 if (boot_cfg == 1) { 1806 dev_info(adev->dev, "GECC is enabled\n"); 1807 } else { 1808 /* enable GECC in next boot cycle if it is disabled 1809 * in boot config, or force enable GECC if failed to 1810 * get boot configuration 1811 */ 1812 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1813 if (ret) 1814 dev_warn(adev->dev, "PSP set boot config failed\n"); 1815 else 1816 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1817 } 1818 } else { 1819 if (!boot_cfg) { 1820 if (!adev->ras_default_ecc_enabled && 1821 amdgpu_ras_enable != 1 && 1822 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 1823 dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n"); 1824 else 1825 dev_info(adev->dev, "GECC is disabled\n"); 1826 } else { 1827 /* disable GECC in next boot cycle if ras is 1828 * disabled by module parameter amdgpu_ras_enable 1829 * and/or amdgpu_ras_mask, or boot_config_get call 1830 * is failed 1831 */ 1832 ret = psp_boot_config_set(adev, 0); 1833 if (ret) 1834 dev_warn(adev->dev, "PSP set boot config failed\n"); 1835 else 1836 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1837 } 1838 } 1839 } 1840 } 1841 1842 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1843 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1844 1845 if (!psp->ras_context.context.mem_context.shared_buf) { 1846 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1847 if (ret) 1848 return ret; 1849 } 1850 1851 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1852 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1853 1854 if (amdgpu_ras_is_poison_mode_supported(adev)) 1855 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1856 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1857 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1858 ras_cmd->ras_in_message.init_flags.xcc_mask = 1859 adev->gfx.xcc_mask; 1860 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1861 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 1862 ras_cmd->ras_in_message.init_flags.nps_mode = 1863 adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 1864 1865 ret = psp_ta_load(psp, &psp->ras_context.context); 1866 1867 if (!ret && !ras_cmd->ras_status) { 1868 psp->ras_context.context.initialized = true; 1869 mutex_init(&psp->ras_context.mutex); 1870 } else { 1871 if (ras_cmd->ras_status) 1872 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1873 1874 /* fail to load RAS TA */ 1875 psp->ras_context.context.initialized = false; 1876 } 1877 1878 return ret; 1879 } 1880 1881 int psp_ras_trigger_error(struct psp_context *psp, 1882 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 1883 { 1884 struct amdgpu_device *adev = psp->adev; 1885 int ret; 1886 uint32_t dev_mask; 1887 uint32_t ras_status = 0; 1888 1889 if (!psp->ras_context.context.initialized || !info) 1890 return -EINVAL; 1891 1892 switch (info->block_id) { 1893 case TA_RAS_BLOCK__GFX: 1894 dev_mask = GET_MASK(GC, instance_mask); 1895 break; 1896 case TA_RAS_BLOCK__SDMA: 1897 dev_mask = GET_MASK(SDMA0, instance_mask); 1898 break; 1899 case TA_RAS_BLOCK__VCN: 1900 case TA_RAS_BLOCK__JPEG: 1901 dev_mask = GET_MASK(VCN, instance_mask); 1902 break; 1903 default: 1904 dev_mask = instance_mask; 1905 break; 1906 } 1907 1908 /* reuse sub_block_index for backward compatibility */ 1909 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 1910 dev_mask &= AMDGPU_RAS_INST_MASK; 1911 info->sub_block_index |= dev_mask; 1912 1913 ret = psp_ras_send_cmd(psp, 1914 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status); 1915 if (ret) 1916 return -EINVAL; 1917 1918 /* If err_event_athub occurs error inject was successful, however 1919 * return status from TA is no long reliable 1920 */ 1921 if (amdgpu_ras_intr_triggered()) 1922 return 0; 1923 1924 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 1925 return -EACCES; 1926 else if (ras_status) 1927 return -EINVAL; 1928 1929 return 0; 1930 } 1931 1932 int psp_ras_query_address(struct psp_context *psp, 1933 struct ta_ras_query_address_input *addr_in, 1934 struct ta_ras_query_address_output *addr_out) 1935 { 1936 int ret; 1937 1938 if (!psp->ras_context.context.initialized || 1939 !addr_in || !addr_out) 1940 return -EINVAL; 1941 1942 ret = psp_ras_send_cmd(psp, 1943 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out); 1944 1945 return ret; 1946 } 1947 // ras end 1948 1949 // HDCP start 1950 static int psp_hdcp_initialize(struct psp_context *psp) 1951 { 1952 int ret; 1953 1954 /* 1955 * TODO: bypass the initialize in sriov for now 1956 */ 1957 if (amdgpu_sriov_vf(psp->adev)) 1958 return 0; 1959 1960 /* bypass hdcp initialization if dmu is harvested */ 1961 if (!amdgpu_device_has_display_hardware(psp->adev)) 1962 return 0; 1963 1964 if (!psp->hdcp_context.context.bin_desc.size_bytes || 1965 !psp->hdcp_context.context.bin_desc.start_addr) { 1966 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1967 return 0; 1968 } 1969 1970 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 1971 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1972 1973 if (!psp->hdcp_context.context.mem_context.shared_buf) { 1974 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 1975 if (ret) 1976 return ret; 1977 } 1978 1979 ret = psp_ta_load(psp, &psp->hdcp_context.context); 1980 if (!ret) { 1981 psp->hdcp_context.context.initialized = true; 1982 mutex_init(&psp->hdcp_context.mutex); 1983 } 1984 1985 return ret; 1986 } 1987 1988 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1989 { 1990 /* 1991 * TODO: bypass the loading in sriov for now 1992 */ 1993 if (amdgpu_sriov_vf(psp->adev)) 1994 return 0; 1995 1996 if (!psp->hdcp_context.context.initialized) 1997 return 0; 1998 1999 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 2000 } 2001 2002 static int psp_hdcp_terminate(struct psp_context *psp) 2003 { 2004 int ret; 2005 2006 /* 2007 * TODO: bypass the terminate in sriov for now 2008 */ 2009 if (amdgpu_sriov_vf(psp->adev)) 2010 return 0; 2011 2012 if (!psp->hdcp_context.context.initialized) 2013 return 0; 2014 2015 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 2016 2017 psp->hdcp_context.context.initialized = false; 2018 2019 return ret; 2020 } 2021 // HDCP end 2022 2023 // DTM start 2024 static int psp_dtm_initialize(struct psp_context *psp) 2025 { 2026 int ret; 2027 2028 /* 2029 * TODO: bypass the initialize in sriov for now 2030 */ 2031 if (amdgpu_sriov_vf(psp->adev)) 2032 return 0; 2033 2034 /* bypass dtm initialization if dmu is harvested */ 2035 if (!amdgpu_device_has_display_hardware(psp->adev)) 2036 return 0; 2037 2038 if (!psp->dtm_context.context.bin_desc.size_bytes || 2039 !psp->dtm_context.context.bin_desc.start_addr) { 2040 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 2041 return 0; 2042 } 2043 2044 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 2045 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2046 2047 if (!psp->dtm_context.context.mem_context.shared_buf) { 2048 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 2049 if (ret) 2050 return ret; 2051 } 2052 2053 ret = psp_ta_load(psp, &psp->dtm_context.context); 2054 if (!ret) { 2055 psp->dtm_context.context.initialized = true; 2056 mutex_init(&psp->dtm_context.mutex); 2057 } 2058 2059 return ret; 2060 } 2061 2062 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2063 { 2064 /* 2065 * TODO: bypass the loading in sriov for now 2066 */ 2067 if (amdgpu_sriov_vf(psp->adev)) 2068 return 0; 2069 2070 if (!psp->dtm_context.context.initialized) 2071 return 0; 2072 2073 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 2074 } 2075 2076 static int psp_dtm_terminate(struct psp_context *psp) 2077 { 2078 int ret; 2079 2080 /* 2081 * TODO: bypass the terminate in sriov for now 2082 */ 2083 if (amdgpu_sriov_vf(psp->adev)) 2084 return 0; 2085 2086 if (!psp->dtm_context.context.initialized) 2087 return 0; 2088 2089 ret = psp_ta_unload(psp, &psp->dtm_context.context); 2090 2091 psp->dtm_context.context.initialized = false; 2092 2093 return ret; 2094 } 2095 // DTM end 2096 2097 // RAP start 2098 static int psp_rap_initialize(struct psp_context *psp) 2099 { 2100 int ret; 2101 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 2102 2103 /* 2104 * TODO: bypass the initialize in sriov for now 2105 */ 2106 if (amdgpu_sriov_vf(psp->adev)) 2107 return 0; 2108 2109 if (!psp->rap_context.context.bin_desc.size_bytes || 2110 !psp->rap_context.context.bin_desc.start_addr) { 2111 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 2112 return 0; 2113 } 2114 2115 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 2116 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2117 2118 if (!psp->rap_context.context.mem_context.shared_buf) { 2119 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 2120 if (ret) 2121 return ret; 2122 } 2123 2124 ret = psp_ta_load(psp, &psp->rap_context.context); 2125 if (!ret) { 2126 psp->rap_context.context.initialized = true; 2127 mutex_init(&psp->rap_context.mutex); 2128 } else 2129 return ret; 2130 2131 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2132 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2133 psp_rap_terminate(psp); 2134 /* free rap shared memory */ 2135 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2136 2137 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2138 ret, status); 2139 2140 return ret; 2141 } 2142 2143 return 0; 2144 } 2145 2146 static int psp_rap_terminate(struct psp_context *psp) 2147 { 2148 int ret; 2149 2150 if (!psp->rap_context.context.initialized) 2151 return 0; 2152 2153 ret = psp_ta_unload(psp, &psp->rap_context.context); 2154 2155 psp->rap_context.context.initialized = false; 2156 2157 return ret; 2158 } 2159 2160 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2161 { 2162 struct ta_rap_shared_memory *rap_cmd; 2163 int ret = 0; 2164 2165 if (!psp->rap_context.context.initialized) 2166 return 0; 2167 2168 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2169 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2170 return -EINVAL; 2171 2172 mutex_lock(&psp->rap_context.mutex); 2173 2174 rap_cmd = (struct ta_rap_shared_memory *) 2175 psp->rap_context.context.mem_context.shared_buf; 2176 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2177 2178 rap_cmd->cmd_id = ta_cmd_id; 2179 rap_cmd->validation_method_id = METHOD_A; 2180 2181 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2182 if (ret) 2183 goto out_unlock; 2184 2185 if (status) 2186 *status = rap_cmd->rap_status; 2187 2188 out_unlock: 2189 mutex_unlock(&psp->rap_context.mutex); 2190 2191 return ret; 2192 } 2193 // RAP end 2194 2195 /* securedisplay start */ 2196 static int psp_securedisplay_initialize(struct psp_context *psp) 2197 { 2198 int ret; 2199 struct ta_securedisplay_cmd *securedisplay_cmd; 2200 2201 /* 2202 * TODO: bypass the initialize in sriov for now 2203 */ 2204 if (amdgpu_sriov_vf(psp->adev)) 2205 return 0; 2206 2207 /* bypass securedisplay initialization if dmu is harvested */ 2208 if (!amdgpu_device_has_display_hardware(psp->adev)) 2209 return 0; 2210 2211 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2212 !psp->securedisplay_context.context.bin_desc.start_addr) { 2213 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); 2214 return 0; 2215 } 2216 2217 psp->securedisplay_context.context.mem_context.shared_mem_size = 2218 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2219 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2220 2221 if (!psp->securedisplay_context.context.initialized) { 2222 ret = psp_ta_init_shared_buf(psp, 2223 &psp->securedisplay_context.context.mem_context); 2224 if (ret) 2225 return ret; 2226 } 2227 2228 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2229 if (!ret) { 2230 psp->securedisplay_context.context.initialized = true; 2231 mutex_init(&psp->securedisplay_context.mutex); 2232 } else 2233 return ret; 2234 2235 mutex_lock(&psp->securedisplay_context.mutex); 2236 2237 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2238 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2239 2240 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2241 2242 mutex_unlock(&psp->securedisplay_context.mutex); 2243 2244 if (ret) { 2245 psp_securedisplay_terminate(psp); 2246 /* free securedisplay shared memory */ 2247 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2248 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2249 return -EINVAL; 2250 } 2251 2252 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2253 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2254 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2255 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2256 /* don't try again */ 2257 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2258 } 2259 2260 return 0; 2261 } 2262 2263 static int psp_securedisplay_terminate(struct psp_context *psp) 2264 { 2265 int ret; 2266 2267 /* 2268 * TODO:bypass the terminate in sriov for now 2269 */ 2270 if (amdgpu_sriov_vf(psp->adev)) 2271 return 0; 2272 2273 if (!psp->securedisplay_context.context.initialized) 2274 return 0; 2275 2276 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2277 2278 psp->securedisplay_context.context.initialized = false; 2279 2280 return ret; 2281 } 2282 2283 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2284 { 2285 int ret; 2286 2287 if (!psp->securedisplay_context.context.initialized) 2288 return -EINVAL; 2289 2290 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2291 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC && 2292 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2) 2293 return -EINVAL; 2294 2295 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2296 2297 return ret; 2298 } 2299 /* SECUREDISPLAY end */ 2300 2301 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2302 { 2303 struct psp_context *psp = &adev->psp; 2304 int ret = 0; 2305 2306 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2307 ret = psp->funcs->wait_for_bootloader(psp); 2308 2309 return ret; 2310 } 2311 2312 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2313 { 2314 if (psp->funcs && 2315 psp->funcs->get_ras_capability) { 2316 return psp->funcs->get_ras_capability(psp); 2317 } else { 2318 return false; 2319 } 2320 } 2321 2322 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) 2323 { 2324 struct psp_context *psp = &adev->psp; 2325 2326 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2327 return false; 2328 2329 if (psp->funcs && psp->funcs->is_reload_needed) 2330 return psp->funcs->is_reload_needed(psp); 2331 2332 return false; 2333 } 2334 2335 static int psp_hw_start(struct psp_context *psp) 2336 { 2337 struct amdgpu_device *adev = psp->adev; 2338 int ret; 2339 2340 if (!amdgpu_sriov_vf(adev)) { 2341 if ((is_psp_fw_valid(psp->kdb)) && 2342 (psp->funcs->bootloader_load_kdb != NULL)) { 2343 ret = psp_bootloader_load_kdb(psp); 2344 if (ret) { 2345 dev_err(adev->dev, "PSP load kdb failed!\n"); 2346 return ret; 2347 } 2348 } 2349 2350 if ((is_psp_fw_valid(psp->spl)) && 2351 (psp->funcs->bootloader_load_spl != NULL)) { 2352 ret = psp_bootloader_load_spl(psp); 2353 if (ret) { 2354 dev_err(adev->dev, "PSP load spl failed!\n"); 2355 return ret; 2356 } 2357 } 2358 2359 if ((is_psp_fw_valid(psp->sys)) && 2360 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2361 ret = psp_bootloader_load_sysdrv(psp); 2362 if (ret) { 2363 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2364 return ret; 2365 } 2366 } 2367 2368 if ((is_psp_fw_valid(psp->soc_drv)) && 2369 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2370 ret = psp_bootloader_load_soc_drv(psp); 2371 if (ret) { 2372 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2373 return ret; 2374 } 2375 } 2376 2377 if ((is_psp_fw_valid(psp->intf_drv)) && 2378 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2379 ret = psp_bootloader_load_intf_drv(psp); 2380 if (ret) { 2381 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2382 return ret; 2383 } 2384 } 2385 2386 if ((is_psp_fw_valid(psp->dbg_drv)) && 2387 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2388 ret = psp_bootloader_load_dbg_drv(psp); 2389 if (ret) { 2390 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2391 return ret; 2392 } 2393 } 2394 2395 if ((is_psp_fw_valid(psp->ras_drv)) && 2396 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2397 ret = psp_bootloader_load_ras_drv(psp); 2398 if (ret) { 2399 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2400 return ret; 2401 } 2402 } 2403 2404 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2405 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2406 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2407 if (ret) { 2408 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2409 return ret; 2410 } 2411 } 2412 2413 if ((is_psp_fw_valid(psp->spdm_drv)) && 2414 (psp->funcs->bootloader_load_spdm_drv != NULL)) { 2415 ret = psp_bootloader_load_spdm_drv(psp); 2416 if (ret) { 2417 dev_err(adev->dev, "PSP load spdm_drv failed!\n"); 2418 return ret; 2419 } 2420 } 2421 2422 if ((is_psp_fw_valid(psp->sos)) && 2423 (psp->funcs->bootloader_load_sos != NULL)) { 2424 ret = psp_bootloader_load_sos(psp); 2425 if (ret) { 2426 dev_err(adev->dev, "PSP load sos failed!\n"); 2427 return ret; 2428 } 2429 } 2430 } 2431 2432 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2433 if (ret) { 2434 dev_err(adev->dev, "PSP create ring failed!\n"); 2435 return ret; 2436 } 2437 2438 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2439 goto skip_pin_bo; 2440 2441 if (!psp->boot_time_tmr || psp->autoload_supported) { 2442 ret = psp_tmr_init(psp); 2443 if (ret) { 2444 dev_err(adev->dev, "PSP tmr init failed!\n"); 2445 return ret; 2446 } 2447 } 2448 2449 skip_pin_bo: 2450 /* 2451 * For ASICs with DF Cstate management centralized 2452 * to PMFW, TMR setup should be performed after PMFW 2453 * loaded and before other non-psp firmware loaded. 2454 */ 2455 if (psp->pmfw_centralized_cstate_management) { 2456 ret = psp_load_smu_fw(psp); 2457 if (ret) 2458 return ret; 2459 } 2460 2461 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2462 ret = psp_tmr_load(psp); 2463 if (ret) { 2464 dev_err(adev->dev, "PSP load tmr failed!\n"); 2465 return ret; 2466 } 2467 } 2468 2469 return 0; 2470 } 2471 2472 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2473 enum psp_gfx_fw_type *type) 2474 { 2475 switch (ucode->ucode_id) { 2476 case AMDGPU_UCODE_ID_CAP: 2477 *type = GFX_FW_TYPE_CAP; 2478 break; 2479 case AMDGPU_UCODE_ID_SDMA0: 2480 *type = GFX_FW_TYPE_SDMA0; 2481 break; 2482 case AMDGPU_UCODE_ID_SDMA1: 2483 *type = GFX_FW_TYPE_SDMA1; 2484 break; 2485 case AMDGPU_UCODE_ID_SDMA2: 2486 *type = GFX_FW_TYPE_SDMA2; 2487 break; 2488 case AMDGPU_UCODE_ID_SDMA3: 2489 *type = GFX_FW_TYPE_SDMA3; 2490 break; 2491 case AMDGPU_UCODE_ID_SDMA4: 2492 *type = GFX_FW_TYPE_SDMA4; 2493 break; 2494 case AMDGPU_UCODE_ID_SDMA5: 2495 *type = GFX_FW_TYPE_SDMA5; 2496 break; 2497 case AMDGPU_UCODE_ID_SDMA6: 2498 *type = GFX_FW_TYPE_SDMA6; 2499 break; 2500 case AMDGPU_UCODE_ID_SDMA7: 2501 *type = GFX_FW_TYPE_SDMA7; 2502 break; 2503 case AMDGPU_UCODE_ID_CP_MES: 2504 *type = GFX_FW_TYPE_CP_MES; 2505 break; 2506 case AMDGPU_UCODE_ID_CP_MES_DATA: 2507 *type = GFX_FW_TYPE_MES_STACK; 2508 break; 2509 case AMDGPU_UCODE_ID_CP_MES1: 2510 *type = GFX_FW_TYPE_CP_MES_KIQ; 2511 break; 2512 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2513 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2514 break; 2515 case AMDGPU_UCODE_ID_CP_CE: 2516 *type = GFX_FW_TYPE_CP_CE; 2517 break; 2518 case AMDGPU_UCODE_ID_CP_PFP: 2519 *type = GFX_FW_TYPE_CP_PFP; 2520 break; 2521 case AMDGPU_UCODE_ID_CP_ME: 2522 *type = GFX_FW_TYPE_CP_ME; 2523 break; 2524 case AMDGPU_UCODE_ID_CP_MEC1: 2525 *type = GFX_FW_TYPE_CP_MEC; 2526 break; 2527 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2528 *type = GFX_FW_TYPE_CP_MEC_ME1; 2529 break; 2530 case AMDGPU_UCODE_ID_CP_MEC2: 2531 *type = GFX_FW_TYPE_CP_MEC; 2532 break; 2533 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2534 *type = GFX_FW_TYPE_CP_MEC_ME2; 2535 break; 2536 case AMDGPU_UCODE_ID_RLC_P: 2537 *type = GFX_FW_TYPE_RLC_P; 2538 break; 2539 case AMDGPU_UCODE_ID_RLC_V: 2540 *type = GFX_FW_TYPE_RLC_V; 2541 break; 2542 case AMDGPU_UCODE_ID_RLC_G: 2543 *type = GFX_FW_TYPE_RLC_G; 2544 break; 2545 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2546 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2547 break; 2548 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2549 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2550 break; 2551 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2552 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2553 break; 2554 case AMDGPU_UCODE_ID_RLC_IRAM: 2555 *type = GFX_FW_TYPE_RLC_IRAM; 2556 break; 2557 case AMDGPU_UCODE_ID_RLC_DRAM: 2558 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2559 break; 2560 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2561 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2562 break; 2563 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2564 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2565 break; 2566 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2567 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2568 break; 2569 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2570 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2571 break; 2572 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2573 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2574 break; 2575 case AMDGPU_UCODE_ID_SMC: 2576 *type = GFX_FW_TYPE_SMU; 2577 break; 2578 case AMDGPU_UCODE_ID_PPTABLE: 2579 *type = GFX_FW_TYPE_PPTABLE; 2580 break; 2581 case AMDGPU_UCODE_ID_UVD: 2582 *type = GFX_FW_TYPE_UVD; 2583 break; 2584 case AMDGPU_UCODE_ID_UVD1: 2585 *type = GFX_FW_TYPE_UVD1; 2586 break; 2587 case AMDGPU_UCODE_ID_VCE: 2588 *type = GFX_FW_TYPE_VCE; 2589 break; 2590 case AMDGPU_UCODE_ID_VCN: 2591 *type = GFX_FW_TYPE_VCN; 2592 break; 2593 case AMDGPU_UCODE_ID_VCN1: 2594 *type = GFX_FW_TYPE_VCN1; 2595 break; 2596 case AMDGPU_UCODE_ID_DMCU_ERAM: 2597 *type = GFX_FW_TYPE_DMCU_ERAM; 2598 break; 2599 case AMDGPU_UCODE_ID_DMCU_INTV: 2600 *type = GFX_FW_TYPE_DMCU_ISR; 2601 break; 2602 case AMDGPU_UCODE_ID_VCN0_RAM: 2603 *type = GFX_FW_TYPE_VCN0_RAM; 2604 break; 2605 case AMDGPU_UCODE_ID_VCN1_RAM: 2606 *type = GFX_FW_TYPE_VCN1_RAM; 2607 break; 2608 case AMDGPU_UCODE_ID_DMCUB: 2609 *type = GFX_FW_TYPE_DMUB; 2610 break; 2611 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2612 case AMDGPU_UCODE_ID_SDMA_RS64: 2613 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2614 break; 2615 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2616 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2617 break; 2618 case AMDGPU_UCODE_ID_IMU_I: 2619 *type = GFX_FW_TYPE_IMU_I; 2620 break; 2621 case AMDGPU_UCODE_ID_IMU_D: 2622 *type = GFX_FW_TYPE_IMU_D; 2623 break; 2624 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2625 *type = GFX_FW_TYPE_RS64_PFP; 2626 break; 2627 case AMDGPU_UCODE_ID_CP_RS64_ME: 2628 *type = GFX_FW_TYPE_RS64_ME; 2629 break; 2630 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2631 *type = GFX_FW_TYPE_RS64_MEC; 2632 break; 2633 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2634 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2635 break; 2636 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2637 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2638 break; 2639 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2640 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2641 break; 2642 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2643 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2644 break; 2645 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2646 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2647 break; 2648 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2649 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2650 break; 2651 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2652 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2653 break; 2654 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2655 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2656 break; 2657 case AMDGPU_UCODE_ID_VPE_CTX: 2658 *type = GFX_FW_TYPE_VPEC_FW1; 2659 break; 2660 case AMDGPU_UCODE_ID_VPE_CTL: 2661 *type = GFX_FW_TYPE_VPEC_FW2; 2662 break; 2663 case AMDGPU_UCODE_ID_VPE: 2664 *type = GFX_FW_TYPE_VPE; 2665 break; 2666 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2667 *type = GFX_FW_TYPE_UMSCH_UCODE; 2668 break; 2669 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2670 *type = GFX_FW_TYPE_UMSCH_DATA; 2671 break; 2672 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2673 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2674 break; 2675 case AMDGPU_UCODE_ID_P2S_TABLE: 2676 *type = GFX_FW_TYPE_P2S_TABLE; 2677 break; 2678 case AMDGPU_UCODE_ID_JPEG_RAM: 2679 *type = GFX_FW_TYPE_JPEG_RAM; 2680 break; 2681 case AMDGPU_UCODE_ID_ISP: 2682 *type = GFX_FW_TYPE_ISP; 2683 break; 2684 case AMDGPU_UCODE_ID_MAXIMUM: 2685 default: 2686 return -EINVAL; 2687 } 2688 2689 return 0; 2690 } 2691 2692 static void psp_print_fw_hdr(struct psp_context *psp, 2693 struct amdgpu_firmware_info *ucode) 2694 { 2695 struct amdgpu_device *adev = psp->adev; 2696 struct common_firmware_header *hdr; 2697 2698 switch (ucode->ucode_id) { 2699 case AMDGPU_UCODE_ID_SDMA0: 2700 case AMDGPU_UCODE_ID_SDMA1: 2701 case AMDGPU_UCODE_ID_SDMA2: 2702 case AMDGPU_UCODE_ID_SDMA3: 2703 case AMDGPU_UCODE_ID_SDMA4: 2704 case AMDGPU_UCODE_ID_SDMA5: 2705 case AMDGPU_UCODE_ID_SDMA6: 2706 case AMDGPU_UCODE_ID_SDMA7: 2707 hdr = (struct common_firmware_header *) 2708 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2709 amdgpu_ucode_print_sdma_hdr(hdr); 2710 break; 2711 case AMDGPU_UCODE_ID_CP_CE: 2712 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2713 amdgpu_ucode_print_gfx_hdr(hdr); 2714 break; 2715 case AMDGPU_UCODE_ID_CP_PFP: 2716 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2717 amdgpu_ucode_print_gfx_hdr(hdr); 2718 break; 2719 case AMDGPU_UCODE_ID_CP_ME: 2720 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2721 amdgpu_ucode_print_gfx_hdr(hdr); 2722 break; 2723 case AMDGPU_UCODE_ID_CP_MEC1: 2724 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2725 amdgpu_ucode_print_gfx_hdr(hdr); 2726 break; 2727 case AMDGPU_UCODE_ID_RLC_G: 2728 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2729 amdgpu_ucode_print_rlc_hdr(hdr); 2730 break; 2731 case AMDGPU_UCODE_ID_SMC: 2732 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2733 amdgpu_ucode_print_smc_hdr(hdr); 2734 break; 2735 default: 2736 break; 2737 } 2738 } 2739 2740 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2741 struct amdgpu_firmware_info *ucode, 2742 struct psp_gfx_cmd_resp *cmd) 2743 { 2744 int ret; 2745 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2746 2747 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2748 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2749 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2750 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2751 2752 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2753 if (ret) 2754 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2755 2756 return ret; 2757 } 2758 2759 int psp_execute_ip_fw_load(struct psp_context *psp, 2760 struct amdgpu_firmware_info *ucode) 2761 { 2762 int ret = 0; 2763 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2764 2765 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2766 if (!ret) { 2767 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2768 psp->fence_buf_mc_addr); 2769 } 2770 2771 release_psp_cmd_buf(psp); 2772 2773 return ret; 2774 } 2775 2776 static int psp_load_p2s_table(struct psp_context *psp) 2777 { 2778 int ret; 2779 struct amdgpu_device *adev = psp->adev; 2780 struct amdgpu_firmware_info *ucode = 2781 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2782 2783 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2784 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2785 return 0; 2786 2787 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 2788 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 2789 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2790 0x0036003C; 2791 if (psp->sos.fw_version < supp_vers) 2792 return 0; 2793 } 2794 2795 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2796 return 0; 2797 2798 ret = psp_execute_ip_fw_load(psp, ucode); 2799 2800 return ret; 2801 } 2802 2803 static int psp_load_smu_fw(struct psp_context *psp) 2804 { 2805 int ret; 2806 struct amdgpu_device *adev = psp->adev; 2807 struct amdgpu_firmware_info *ucode = 2808 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2809 struct amdgpu_ras *ras = psp->ras_context.ras; 2810 2811 /* 2812 * Skip SMU FW reloading in case of using BACO for runpm only, 2813 * as SMU is always alive. 2814 */ 2815 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2816 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2817 return 0; 2818 2819 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2820 return 0; 2821 2822 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2823 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2824 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2825 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2826 if (ret) 2827 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2828 } 2829 2830 ret = psp_execute_ip_fw_load(psp, ucode); 2831 2832 if (ret) 2833 dev_err(adev->dev, "PSP load smu failed!\n"); 2834 2835 return ret; 2836 } 2837 2838 static bool fw_load_skip_check(struct psp_context *psp, 2839 struct amdgpu_firmware_info *ucode) 2840 { 2841 if (!ucode->fw || !ucode->ucode_size) 2842 return true; 2843 2844 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2845 return true; 2846 2847 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2848 (psp_smu_reload_quirk(psp) || 2849 psp->autoload_supported || 2850 psp->pmfw_centralized_cstate_management)) 2851 return true; 2852 2853 if (amdgpu_sriov_vf(psp->adev) && 2854 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2855 return true; 2856 2857 if (psp->autoload_supported && 2858 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2859 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2860 /* skip mec JT when autoload is enabled */ 2861 return true; 2862 2863 return false; 2864 } 2865 2866 int psp_load_fw_list(struct psp_context *psp, 2867 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2868 { 2869 int ret = 0, i; 2870 struct amdgpu_firmware_info *ucode; 2871 2872 for (i = 0; i < ucode_count; ++i) { 2873 ucode = ucode_list[i]; 2874 psp_print_fw_hdr(psp, ucode); 2875 ret = psp_execute_ip_fw_load(psp, ucode); 2876 if (ret) 2877 return ret; 2878 } 2879 return ret; 2880 } 2881 2882 static int psp_load_non_psp_fw(struct psp_context *psp) 2883 { 2884 int i, ret; 2885 struct amdgpu_firmware_info *ucode; 2886 struct amdgpu_device *adev = psp->adev; 2887 2888 if (psp->autoload_supported && 2889 !psp->pmfw_centralized_cstate_management) { 2890 ret = psp_load_smu_fw(psp); 2891 if (ret) 2892 return ret; 2893 } 2894 2895 /* Load P2S table first if it's available */ 2896 psp_load_p2s_table(psp); 2897 2898 for (i = 0; i < adev->firmware.max_ucodes; i++) { 2899 ucode = &adev->firmware.ucode[i]; 2900 2901 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2902 !fw_load_skip_check(psp, ucode)) { 2903 ret = psp_load_smu_fw(psp); 2904 if (ret) 2905 return ret; 2906 continue; 2907 } 2908 2909 if (fw_load_skip_check(psp, ucode)) 2910 continue; 2911 2912 if (psp->autoload_supported && 2913 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 2914 IP_VERSION(11, 0, 7) || 2915 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2916 IP_VERSION(11, 0, 11) || 2917 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2918 IP_VERSION(11, 0, 12)) && 2919 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2920 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2921 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 2922 /* PSP only receive one SDMA fw for sienna_cichlid, 2923 * as all four sdma fw are same 2924 */ 2925 continue; 2926 2927 psp_print_fw_hdr(psp, ucode); 2928 2929 ret = psp_execute_ip_fw_load(psp, ucode); 2930 if (ret) 2931 return ret; 2932 2933 /* Start rlc autoload after psp received all the gfx firmware */ 2934 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2935 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 2936 ret = psp_rlc_autoload_start(psp); 2937 if (ret) { 2938 dev_err(adev->dev, "Failed to start rlc autoload\n"); 2939 return ret; 2940 } 2941 } 2942 } 2943 2944 return 0; 2945 } 2946 2947 static int psp_load_fw(struct amdgpu_device *adev) 2948 { 2949 int ret; 2950 struct psp_context *psp = &adev->psp; 2951 2952 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2953 /* should not destroy ring, only stop */ 2954 psp_ring_stop(psp, PSP_RING_TYPE__KM); 2955 } else { 2956 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2957 2958 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2959 if (ret) { 2960 dev_err(adev->dev, "PSP ring init failed!\n"); 2961 goto failed; 2962 } 2963 } 2964 2965 ret = psp_hw_start(psp); 2966 if (ret) 2967 goto failed; 2968 2969 ret = psp_load_non_psp_fw(psp); 2970 if (ret) 2971 goto failed1; 2972 2973 ret = psp_asd_initialize(psp); 2974 if (ret) { 2975 dev_err(adev->dev, "PSP load asd failed!\n"); 2976 goto failed1; 2977 } 2978 2979 ret = psp_rl_load(adev); 2980 if (ret) { 2981 dev_err(adev->dev, "PSP load RL failed!\n"); 2982 goto failed1; 2983 } 2984 2985 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2986 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2987 ret = psp_xgmi_initialize(psp, false, true); 2988 /* Warning the XGMI seesion initialize failure 2989 * Instead of stop driver initialization 2990 */ 2991 if (ret) 2992 dev_err(psp->adev->dev, 2993 "XGMI: Failed to initialize XGMI session\n"); 2994 } 2995 } 2996 2997 if (psp->ta_fw) { 2998 ret = psp_ras_initialize(psp); 2999 if (ret) 3000 dev_err(psp->adev->dev, 3001 "RAS: Failed to initialize RAS\n"); 3002 3003 ret = psp_hdcp_initialize(psp); 3004 if (ret) 3005 dev_err(psp->adev->dev, 3006 "HDCP: Failed to initialize HDCP\n"); 3007 3008 ret = psp_dtm_initialize(psp); 3009 if (ret) 3010 dev_err(psp->adev->dev, 3011 "DTM: Failed to initialize DTM\n"); 3012 3013 ret = psp_rap_initialize(psp); 3014 if (ret) 3015 dev_err(psp->adev->dev, 3016 "RAP: Failed to initialize RAP\n"); 3017 3018 ret = psp_securedisplay_initialize(psp); 3019 if (ret) 3020 dev_err(psp->adev->dev, 3021 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3022 } 3023 3024 return 0; 3025 3026 failed1: 3027 psp_free_shared_bufs(psp); 3028 failed: 3029 /* 3030 * all cleanup jobs (xgmi terminate, ras terminate, 3031 * ring destroy, cmd/fence/fw buffers destory, 3032 * psp->cmd destory) are delayed to psp_hw_fini 3033 */ 3034 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3035 return ret; 3036 } 3037 3038 static int psp_hw_init(struct amdgpu_ip_block *ip_block) 3039 { 3040 int ret; 3041 struct amdgpu_device *adev = ip_block->adev; 3042 3043 mutex_lock(&adev->firmware.mutex); 3044 3045 ret = amdgpu_ucode_init_bo(adev); 3046 if (ret) 3047 goto failed; 3048 3049 ret = psp_load_fw(adev); 3050 if (ret) { 3051 dev_err(adev->dev, "PSP firmware loading failed\n"); 3052 goto failed; 3053 } 3054 3055 mutex_unlock(&adev->firmware.mutex); 3056 return 0; 3057 3058 failed: 3059 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 3060 mutex_unlock(&adev->firmware.mutex); 3061 return -EINVAL; 3062 } 3063 3064 static int psp_hw_fini(struct amdgpu_ip_block *ip_block) 3065 { 3066 struct amdgpu_device *adev = ip_block->adev; 3067 struct psp_context *psp = &adev->psp; 3068 3069 if (psp->ta_fw) { 3070 psp_ras_terminate(psp); 3071 psp_securedisplay_terminate(psp); 3072 psp_rap_terminate(psp); 3073 psp_dtm_terminate(psp); 3074 psp_hdcp_terminate(psp); 3075 3076 if (adev->gmc.xgmi.num_physical_nodes > 1) 3077 psp_xgmi_terminate(psp); 3078 } 3079 3080 psp_asd_terminate(psp); 3081 psp_tmr_terminate(psp); 3082 3083 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3084 3085 return 0; 3086 } 3087 3088 static int psp_suspend(struct amdgpu_ip_block *ip_block) 3089 { 3090 int ret = 0; 3091 struct amdgpu_device *adev = ip_block->adev; 3092 struct psp_context *psp = &adev->psp; 3093 3094 if (adev->gmc.xgmi.num_physical_nodes > 1 && 3095 psp->xgmi_context.context.initialized) { 3096 ret = psp_xgmi_terminate(psp); 3097 if (ret) { 3098 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 3099 goto out; 3100 } 3101 } 3102 3103 if (psp->ta_fw) { 3104 ret = psp_ras_terminate(psp); 3105 if (ret) { 3106 dev_err(adev->dev, "Failed to terminate ras ta\n"); 3107 goto out; 3108 } 3109 ret = psp_hdcp_terminate(psp); 3110 if (ret) { 3111 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 3112 goto out; 3113 } 3114 ret = psp_dtm_terminate(psp); 3115 if (ret) { 3116 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 3117 goto out; 3118 } 3119 ret = psp_rap_terminate(psp); 3120 if (ret) { 3121 dev_err(adev->dev, "Failed to terminate rap ta\n"); 3122 goto out; 3123 } 3124 ret = psp_securedisplay_terminate(psp); 3125 if (ret) { 3126 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 3127 goto out; 3128 } 3129 } 3130 3131 ret = psp_asd_terminate(psp); 3132 if (ret) { 3133 dev_err(adev->dev, "Failed to terminate asd\n"); 3134 goto out; 3135 } 3136 3137 ret = psp_tmr_terminate(psp); 3138 if (ret) { 3139 dev_err(adev->dev, "Failed to terminate tmr\n"); 3140 goto out; 3141 } 3142 3143 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 3144 if (ret) 3145 dev_err(adev->dev, "PSP ring stop failed\n"); 3146 3147 out: 3148 return ret; 3149 } 3150 3151 static int psp_resume(struct amdgpu_ip_block *ip_block) 3152 { 3153 int ret; 3154 struct amdgpu_device *adev = ip_block->adev; 3155 struct psp_context *psp = &adev->psp; 3156 3157 dev_info(adev->dev, "PSP is resuming...\n"); 3158 3159 if (psp->mem_train_ctx.enable_mem_training) { 3160 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3161 if (ret) { 3162 dev_err(adev->dev, "Failed to process memory training!\n"); 3163 return ret; 3164 } 3165 } 3166 3167 mutex_lock(&adev->firmware.mutex); 3168 3169 ret = amdgpu_ucode_init_bo(adev); 3170 if (ret) 3171 goto failed; 3172 3173 ret = psp_hw_start(psp); 3174 if (ret) 3175 goto failed; 3176 3177 ret = psp_load_non_psp_fw(psp); 3178 if (ret) 3179 goto failed; 3180 3181 ret = psp_asd_initialize(psp); 3182 if (ret) { 3183 dev_err(adev->dev, "PSP load asd failed!\n"); 3184 goto failed; 3185 } 3186 3187 ret = psp_rl_load(adev); 3188 if (ret) { 3189 dev_err(adev->dev, "PSP load RL failed!\n"); 3190 goto failed; 3191 } 3192 3193 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3194 ret = psp_xgmi_initialize(psp, false, true); 3195 /* Warning the XGMI seesion initialize failure 3196 * Instead of stop driver initialization 3197 */ 3198 if (ret) 3199 dev_err(psp->adev->dev, 3200 "XGMI: Failed to initialize XGMI session\n"); 3201 } 3202 3203 if (psp->ta_fw) { 3204 ret = psp_ras_initialize(psp); 3205 if (ret) 3206 dev_err(psp->adev->dev, 3207 "RAS: Failed to initialize RAS\n"); 3208 3209 ret = psp_hdcp_initialize(psp); 3210 if (ret) 3211 dev_err(psp->adev->dev, 3212 "HDCP: Failed to initialize HDCP\n"); 3213 3214 ret = psp_dtm_initialize(psp); 3215 if (ret) 3216 dev_err(psp->adev->dev, 3217 "DTM: Failed to initialize DTM\n"); 3218 3219 ret = psp_rap_initialize(psp); 3220 if (ret) 3221 dev_err(psp->adev->dev, 3222 "RAP: Failed to initialize RAP\n"); 3223 3224 ret = psp_securedisplay_initialize(psp); 3225 if (ret) 3226 dev_err(psp->adev->dev, 3227 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3228 } 3229 3230 mutex_unlock(&adev->firmware.mutex); 3231 3232 return 0; 3233 3234 failed: 3235 dev_err(adev->dev, "PSP resume failed\n"); 3236 mutex_unlock(&adev->firmware.mutex); 3237 return ret; 3238 } 3239 3240 int psp_gpu_reset(struct amdgpu_device *adev) 3241 { 3242 int ret; 3243 3244 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3245 return 0; 3246 3247 mutex_lock(&adev->psp.mutex); 3248 ret = psp_mode1_reset(&adev->psp); 3249 mutex_unlock(&adev->psp.mutex); 3250 3251 return ret; 3252 } 3253 3254 int psp_rlc_autoload_start(struct psp_context *psp) 3255 { 3256 int ret; 3257 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3258 3259 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3260 3261 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3262 psp->fence_buf_mc_addr); 3263 3264 release_psp_cmd_buf(psp); 3265 3266 return ret; 3267 } 3268 3269 int psp_ring_cmd_submit(struct psp_context *psp, 3270 uint64_t cmd_buf_mc_addr, 3271 uint64_t fence_mc_addr, 3272 int index) 3273 { 3274 unsigned int psp_write_ptr_reg = 0; 3275 struct psp_gfx_rb_frame *write_frame; 3276 struct psp_ring *ring = &psp->km_ring; 3277 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3278 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3279 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3280 struct amdgpu_device *adev = psp->adev; 3281 uint32_t ring_size_dw = ring->ring_size / 4; 3282 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3283 3284 /* KM (GPCOM) prepare write pointer */ 3285 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3286 3287 /* Update KM RB frame pointer to new frame */ 3288 /* write_frame ptr increments by size of rb_frame in bytes */ 3289 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3290 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3291 write_frame = ring_buffer_start; 3292 else 3293 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3294 /* Check invalid write_frame ptr address */ 3295 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3296 dev_err(adev->dev, 3297 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3298 ring_buffer_start, ring_buffer_end, write_frame); 3299 dev_err(adev->dev, 3300 "write_frame is pointing to address out of bounds\n"); 3301 return -EINVAL; 3302 } 3303 3304 /* Initialize KM RB frame */ 3305 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3306 3307 /* Update KM RB frame */ 3308 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3309 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3310 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3311 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3312 write_frame->fence_value = index; 3313 amdgpu_device_flush_hdp(adev, NULL); 3314 3315 /* Update the write Pointer in DWORDs */ 3316 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3317 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3318 return 0; 3319 } 3320 3321 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3322 { 3323 struct amdgpu_device *adev = psp->adev; 3324 const struct psp_firmware_header_v1_0 *asd_hdr; 3325 int err = 0; 3326 3327 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED, 3328 "amdgpu/%s_asd.bin", chip_name); 3329 if (err) 3330 goto out; 3331 3332 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3333 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3334 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3335 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3336 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3337 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3338 return 0; 3339 out: 3340 amdgpu_ucode_release(&adev->psp.asd_fw); 3341 return err; 3342 } 3343 3344 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3345 { 3346 struct amdgpu_device *adev = psp->adev; 3347 const struct psp_firmware_header_v1_0 *toc_hdr; 3348 int err = 0; 3349 3350 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED, 3351 "amdgpu/%s_toc.bin", chip_name); 3352 if (err) 3353 goto out; 3354 3355 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3356 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3357 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3358 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3359 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3360 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3361 return 0; 3362 out: 3363 amdgpu_ucode_release(&adev->psp.toc_fw); 3364 return err; 3365 } 3366 3367 static int parse_sos_bin_descriptor(struct psp_context *psp, 3368 const struct psp_fw_bin_desc *desc, 3369 const struct psp_firmware_header_v2_0 *sos_hdr) 3370 { 3371 uint8_t *ucode_start_addr = NULL; 3372 3373 if (!psp || !desc || !sos_hdr) 3374 return -EINVAL; 3375 3376 ucode_start_addr = (uint8_t *)sos_hdr + 3377 le32_to_cpu(desc->offset_bytes) + 3378 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3379 3380 switch (desc->fw_type) { 3381 case PSP_FW_TYPE_PSP_SOS: 3382 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3383 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3384 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3385 psp->sos.start_addr = ucode_start_addr; 3386 break; 3387 case PSP_FW_TYPE_PSP_SYS_DRV: 3388 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3389 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3390 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3391 psp->sys.start_addr = ucode_start_addr; 3392 break; 3393 case PSP_FW_TYPE_PSP_KDB: 3394 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3395 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3396 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3397 psp->kdb.start_addr = ucode_start_addr; 3398 break; 3399 case PSP_FW_TYPE_PSP_TOC: 3400 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3401 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3402 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3403 psp->toc.start_addr = ucode_start_addr; 3404 break; 3405 case PSP_FW_TYPE_PSP_SPL: 3406 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3407 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3408 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3409 psp->spl.start_addr = ucode_start_addr; 3410 break; 3411 case PSP_FW_TYPE_PSP_RL: 3412 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3413 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3414 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3415 psp->rl.start_addr = ucode_start_addr; 3416 break; 3417 case PSP_FW_TYPE_PSP_SOC_DRV: 3418 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3419 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3420 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3421 psp->soc_drv.start_addr = ucode_start_addr; 3422 break; 3423 case PSP_FW_TYPE_PSP_INTF_DRV: 3424 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3425 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3426 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3427 psp->intf_drv.start_addr = ucode_start_addr; 3428 break; 3429 case PSP_FW_TYPE_PSP_DBG_DRV: 3430 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3431 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3432 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3433 psp->dbg_drv.start_addr = ucode_start_addr; 3434 break; 3435 case PSP_FW_TYPE_PSP_RAS_DRV: 3436 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3437 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3438 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3439 psp->ras_drv.start_addr = ucode_start_addr; 3440 break; 3441 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3442 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3443 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3444 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3445 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3446 break; 3447 case PSP_FW_TYPE_PSP_SPDM_DRV: 3448 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version); 3449 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version); 3450 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3451 psp->spdm_drv.start_addr = ucode_start_addr; 3452 break; 3453 default: 3454 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3455 break; 3456 } 3457 3458 return 0; 3459 } 3460 3461 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3462 { 3463 const struct psp_firmware_header_v1_0 *sos_hdr; 3464 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3465 uint8_t *ucode_array_start_addr; 3466 3467 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3468 ucode_array_start_addr = (uint8_t *)sos_hdr + 3469 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3470 3471 if (adev->gmc.xgmi.connected_to_cpu || 3472 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3473 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3474 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3475 3476 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3477 adev->psp.sys.start_addr = ucode_array_start_addr; 3478 3479 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3480 adev->psp.sos.start_addr = ucode_array_start_addr + 3481 le32_to_cpu(sos_hdr->sos.offset_bytes); 3482 } else { 3483 /* Load alternate PSP SOS FW */ 3484 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3485 3486 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3487 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3488 3489 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3490 adev->psp.sys.start_addr = ucode_array_start_addr + 3491 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3492 3493 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3494 adev->psp.sos.start_addr = ucode_array_start_addr + 3495 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3496 } 3497 3498 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3499 dev_warn(adev->dev, "PSP SOS FW not available"); 3500 return -EINVAL; 3501 } 3502 3503 return 0; 3504 } 3505 3506 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3507 { 3508 struct amdgpu_device *adev = psp->adev; 3509 const struct psp_firmware_header_v1_0 *sos_hdr; 3510 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3511 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3512 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3513 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3514 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; 3515 int fw_index, fw_bin_count, start_index = 0; 3516 const struct psp_fw_bin_desc *fw_bin; 3517 uint8_t *ucode_array_start_addr; 3518 int err = 0; 3519 3520 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3521 "amdgpu/%s_sos.bin", chip_name); 3522 if (err) 3523 goto out; 3524 3525 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3526 ucode_array_start_addr = (uint8_t *)sos_hdr + 3527 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3528 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3529 3530 switch (sos_hdr->header.header_version_major) { 3531 case 1: 3532 err = psp_init_sos_base_fw(adev); 3533 if (err) 3534 goto out; 3535 3536 if (sos_hdr->header.header_version_minor == 1) { 3537 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3538 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3539 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3540 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3541 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3542 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3543 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3544 } 3545 if (sos_hdr->header.header_version_minor == 2) { 3546 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3547 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3548 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3549 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3550 } 3551 if (sos_hdr->header.header_version_minor == 3) { 3552 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3553 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3554 adev->psp.toc.start_addr = ucode_array_start_addr + 3555 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3556 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3557 adev->psp.kdb.start_addr = ucode_array_start_addr + 3558 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3559 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3560 adev->psp.spl.start_addr = ucode_array_start_addr + 3561 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3562 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3563 adev->psp.rl.start_addr = ucode_array_start_addr + 3564 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3565 } 3566 break; 3567 case 2: 3568 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3569 3570 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); 3571 3572 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { 3573 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3574 err = -EINVAL; 3575 goto out; 3576 } 3577 3578 if (sos_hdr_v2_0->header.header_version_minor == 1) { 3579 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; 3580 3581 fw_bin = sos_hdr_v2_1->psp_fw_bin; 3582 3583 if (psp_is_aux_sos_load_required(psp)) 3584 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3585 else 3586 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3587 3588 } else { 3589 fw_bin = sos_hdr_v2_0->psp_fw_bin; 3590 } 3591 3592 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { 3593 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, 3594 sos_hdr_v2_0); 3595 if (err) 3596 goto out; 3597 } 3598 break; 3599 default: 3600 dev_err(adev->dev, 3601 "unsupported psp sos firmware\n"); 3602 err = -EINVAL; 3603 goto out; 3604 } 3605 3606 return 0; 3607 out: 3608 amdgpu_ucode_release(&adev->psp.sos_fw); 3609 3610 return err; 3611 } 3612 3613 static bool is_ta_fw_applicable(struct psp_context *psp, 3614 const struct psp_fw_bin_desc *desc) 3615 { 3616 struct amdgpu_device *adev = psp->adev; 3617 uint32_t fw_version; 3618 3619 switch (desc->fw_type) { 3620 case TA_FW_TYPE_PSP_XGMI: 3621 case TA_FW_TYPE_PSP_XGMI_AUX: 3622 /* for now, AUX TA only exists on 13.0.6 ta bin, 3623 * from v20.00.0x.14 3624 */ 3625 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3626 IP_VERSION(13, 0, 6)) { 3627 fw_version = le32_to_cpu(desc->fw_version); 3628 3629 if (adev->flags & AMD_IS_APU && 3630 (fw_version & 0xff) >= 0x14) 3631 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; 3632 else 3633 return desc->fw_type == TA_FW_TYPE_PSP_XGMI; 3634 } 3635 break; 3636 default: 3637 break; 3638 } 3639 3640 return true; 3641 } 3642 3643 static int parse_ta_bin_descriptor(struct psp_context *psp, 3644 const struct psp_fw_bin_desc *desc, 3645 const struct ta_firmware_header_v2_0 *ta_hdr) 3646 { 3647 uint8_t *ucode_start_addr = NULL; 3648 3649 if (!psp || !desc || !ta_hdr) 3650 return -EINVAL; 3651 3652 if (!is_ta_fw_applicable(psp, desc)) 3653 return 0; 3654 3655 ucode_start_addr = (uint8_t *)ta_hdr + 3656 le32_to_cpu(desc->offset_bytes) + 3657 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3658 3659 switch (desc->fw_type) { 3660 case TA_FW_TYPE_PSP_ASD: 3661 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3662 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3663 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3664 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3665 break; 3666 case TA_FW_TYPE_PSP_XGMI: 3667 case TA_FW_TYPE_PSP_XGMI_AUX: 3668 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3669 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3670 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3671 break; 3672 case TA_FW_TYPE_PSP_RAS: 3673 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3674 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3675 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3676 break; 3677 case TA_FW_TYPE_PSP_HDCP: 3678 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3679 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3680 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3681 break; 3682 case TA_FW_TYPE_PSP_DTM: 3683 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3684 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3685 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3686 break; 3687 case TA_FW_TYPE_PSP_RAP: 3688 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3689 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3690 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3691 break; 3692 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3693 psp->securedisplay_context.context.bin_desc.fw_version = 3694 le32_to_cpu(desc->fw_version); 3695 psp->securedisplay_context.context.bin_desc.size_bytes = 3696 le32_to_cpu(desc->size_bytes); 3697 psp->securedisplay_context.context.bin_desc.start_addr = 3698 ucode_start_addr; 3699 break; 3700 default: 3701 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3702 break; 3703 } 3704 3705 return 0; 3706 } 3707 3708 static int parse_ta_v1_microcode(struct psp_context *psp) 3709 { 3710 const struct ta_firmware_header_v1_0 *ta_hdr; 3711 struct amdgpu_device *adev = psp->adev; 3712 3713 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3714 3715 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3716 return -EINVAL; 3717 3718 adev->psp.xgmi_context.context.bin_desc.fw_version = 3719 le32_to_cpu(ta_hdr->xgmi.fw_version); 3720 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3721 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3722 adev->psp.xgmi_context.context.bin_desc.start_addr = 3723 (uint8_t *)ta_hdr + 3724 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3725 3726 adev->psp.ras_context.context.bin_desc.fw_version = 3727 le32_to_cpu(ta_hdr->ras.fw_version); 3728 adev->psp.ras_context.context.bin_desc.size_bytes = 3729 le32_to_cpu(ta_hdr->ras.size_bytes); 3730 adev->psp.ras_context.context.bin_desc.start_addr = 3731 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3732 le32_to_cpu(ta_hdr->ras.offset_bytes); 3733 3734 adev->psp.hdcp_context.context.bin_desc.fw_version = 3735 le32_to_cpu(ta_hdr->hdcp.fw_version); 3736 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3737 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3738 adev->psp.hdcp_context.context.bin_desc.start_addr = 3739 (uint8_t *)ta_hdr + 3740 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3741 3742 adev->psp.dtm_context.context.bin_desc.fw_version = 3743 le32_to_cpu(ta_hdr->dtm.fw_version); 3744 adev->psp.dtm_context.context.bin_desc.size_bytes = 3745 le32_to_cpu(ta_hdr->dtm.size_bytes); 3746 adev->psp.dtm_context.context.bin_desc.start_addr = 3747 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3748 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3749 3750 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3751 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3752 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3753 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3754 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3755 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3756 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3757 3758 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3759 3760 return 0; 3761 } 3762 3763 static int parse_ta_v2_microcode(struct psp_context *psp) 3764 { 3765 const struct ta_firmware_header_v2_0 *ta_hdr; 3766 struct amdgpu_device *adev = psp->adev; 3767 int err = 0; 3768 int ta_index = 0; 3769 3770 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3771 3772 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3773 return -EINVAL; 3774 3775 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3776 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3777 return -EINVAL; 3778 } 3779 3780 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3781 err = parse_ta_bin_descriptor(psp, 3782 &ta_hdr->ta_fw_bin[ta_index], 3783 ta_hdr); 3784 if (err) 3785 return err; 3786 } 3787 3788 return 0; 3789 } 3790 3791 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3792 { 3793 const struct common_firmware_header *hdr; 3794 struct amdgpu_device *adev = psp->adev; 3795 int err; 3796 3797 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3798 "amdgpu/%s_ta.bin", chip_name); 3799 if (err) 3800 return err; 3801 3802 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3803 switch (le16_to_cpu(hdr->header_version_major)) { 3804 case 1: 3805 err = parse_ta_v1_microcode(psp); 3806 break; 3807 case 2: 3808 err = parse_ta_v2_microcode(psp); 3809 break; 3810 default: 3811 dev_err(adev->dev, "unsupported TA header version\n"); 3812 err = -EINVAL; 3813 } 3814 3815 if (err) 3816 amdgpu_ucode_release(&adev->psp.ta_fw); 3817 3818 return err; 3819 } 3820 3821 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3822 { 3823 struct amdgpu_device *adev = psp->adev; 3824 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3825 struct amdgpu_firmware_info *info = NULL; 3826 int err = 0; 3827 3828 if (!amdgpu_sriov_vf(adev)) { 3829 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3830 return -EINVAL; 3831 } 3832 3833 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL, 3834 "amdgpu/%s_cap.bin", chip_name); 3835 if (err) { 3836 if (err == -ENODEV) { 3837 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3838 err = 0; 3839 } else { 3840 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3841 } 3842 goto out; 3843 } 3844 3845 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3846 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3847 info->fw = adev->psp.cap_fw; 3848 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3849 adev->psp.cap_fw->data; 3850 adev->firmware.fw_size += ALIGN( 3851 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3852 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3853 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3854 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3855 3856 return 0; 3857 3858 out: 3859 amdgpu_ucode_release(&adev->psp.cap_fw); 3860 return err; 3861 } 3862 3863 int psp_config_sq_perfmon(struct psp_context *psp, 3864 uint32_t xcp_id, bool core_override_enable, 3865 bool reg_override_enable, bool perfmon_override_enable) 3866 { 3867 int ret; 3868 3869 if (amdgpu_sriov_vf(psp->adev)) 3870 return 0; 3871 3872 if (xcp_id > MAX_XCP) { 3873 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); 3874 return -EINVAL; 3875 } 3876 3877 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { 3878 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", 3879 amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); 3880 return -EINVAL; 3881 } 3882 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3883 3884 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; 3885 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); 3886 cmd->cmd.config_sq_perfmon.core_override = core_override_enable; 3887 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; 3888 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; 3889 3890 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 3891 if (ret) 3892 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", 3893 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); 3894 3895 release_psp_cmd_buf(psp); 3896 return ret; 3897 } 3898 3899 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 3900 enum amd_clockgating_state state) 3901 { 3902 return 0; 3903 } 3904 3905 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block, 3906 enum amd_powergating_state state) 3907 { 3908 return 0; 3909 } 3910 3911 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 3912 struct device_attribute *attr, 3913 char *buf) 3914 { 3915 struct drm_device *ddev = dev_get_drvdata(dev); 3916 struct amdgpu_device *adev = drm_to_adev(ddev); 3917 struct amdgpu_ip_block *ip_block; 3918 uint32_t fw_ver; 3919 int ret; 3920 3921 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 3922 if (!ip_block || !ip_block->status.late_initialized) { 3923 dev_info(adev->dev, "PSP block is not ready yet\n."); 3924 return -EBUSY; 3925 } 3926 3927 mutex_lock(&adev->psp.mutex); 3928 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 3929 mutex_unlock(&adev->psp.mutex); 3930 3931 if (ret) { 3932 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 3933 return ret; 3934 } 3935 3936 return sysfs_emit(buf, "%x\n", fw_ver); 3937 } 3938 3939 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 3940 struct device_attribute *attr, 3941 const char *buf, 3942 size_t count) 3943 { 3944 struct drm_device *ddev = dev_get_drvdata(dev); 3945 struct amdgpu_device *adev = drm_to_adev(ddev); 3946 int ret, idx; 3947 const struct firmware *usbc_pd_fw; 3948 struct amdgpu_bo *fw_buf_bo = NULL; 3949 uint64_t fw_pri_mc_addr; 3950 void *fw_pri_cpu_addr; 3951 struct amdgpu_ip_block *ip_block; 3952 3953 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 3954 if (!ip_block || !ip_block->status.late_initialized) { 3955 dev_err(adev->dev, "PSP block is not ready yet."); 3956 return -EBUSY; 3957 } 3958 3959 if (!drm_dev_enter(ddev, &idx)) 3960 return -ENODEV; 3961 3962 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED, 3963 "amdgpu/%s", buf); 3964 if (ret) 3965 goto fail; 3966 3967 /* LFB address which is aligned to 1MB boundary per PSP request */ 3968 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3969 AMDGPU_GEM_DOMAIN_VRAM | 3970 AMDGPU_GEM_DOMAIN_GTT, 3971 &fw_buf_bo, &fw_pri_mc_addr, 3972 &fw_pri_cpu_addr); 3973 if (ret) 3974 goto rel_buf; 3975 3976 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 3977 3978 mutex_lock(&adev->psp.mutex); 3979 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 3980 mutex_unlock(&adev->psp.mutex); 3981 3982 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3983 3984 rel_buf: 3985 amdgpu_ucode_release(&usbc_pd_fw); 3986 fail: 3987 if (ret) { 3988 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 3989 count = ret; 3990 } 3991 3992 drm_dev_exit(idx); 3993 return count; 3994 } 3995 3996 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 3997 { 3998 int idx; 3999 4000 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 4001 return; 4002 4003 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 4004 memcpy(psp->fw_pri_buf, start_addr, bin_size); 4005 4006 drm_dev_exit(idx); 4007 } 4008 4009 /** 4010 * DOC: usbc_pd_fw 4011 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 4012 * this file will trigger the update process. 4013 */ 4014 static DEVICE_ATTR(usbc_pd_fw, 0644, 4015 psp_usbc_pd_fw_sysfs_read, 4016 psp_usbc_pd_fw_sysfs_write); 4017 4018 int is_psp_fw_valid(struct psp_bin_desc bin) 4019 { 4020 return bin.size_bytes; 4021 } 4022 4023 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 4024 struct bin_attribute *bin_attr, 4025 char *buffer, loff_t pos, size_t count) 4026 { 4027 struct device *dev = kobj_to_dev(kobj); 4028 struct drm_device *ddev = dev_get_drvdata(dev); 4029 struct amdgpu_device *adev = drm_to_adev(ddev); 4030 4031 adev->psp.vbflash_done = false; 4032 4033 /* Safeguard against memory drain */ 4034 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 4035 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 4036 kvfree(adev->psp.vbflash_tmp_buf); 4037 adev->psp.vbflash_tmp_buf = NULL; 4038 adev->psp.vbflash_image_size = 0; 4039 return -ENOMEM; 4040 } 4041 4042 /* TODO Just allocate max for now and optimize to realloc later if needed */ 4043 if (!adev->psp.vbflash_tmp_buf) { 4044 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 4045 if (!adev->psp.vbflash_tmp_buf) 4046 return -ENOMEM; 4047 } 4048 4049 mutex_lock(&adev->psp.mutex); 4050 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 4051 adev->psp.vbflash_image_size += count; 4052 mutex_unlock(&adev->psp.mutex); 4053 4054 dev_dbg(adev->dev, "IFWI staged for update\n"); 4055 4056 return count; 4057 } 4058 4059 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 4060 struct bin_attribute *bin_attr, char *buffer, 4061 loff_t pos, size_t count) 4062 { 4063 struct device *dev = kobj_to_dev(kobj); 4064 struct drm_device *ddev = dev_get_drvdata(dev); 4065 struct amdgpu_device *adev = drm_to_adev(ddev); 4066 struct amdgpu_bo *fw_buf_bo = NULL; 4067 uint64_t fw_pri_mc_addr; 4068 void *fw_pri_cpu_addr; 4069 int ret; 4070 4071 if (adev->psp.vbflash_image_size == 0) 4072 return -EINVAL; 4073 4074 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 4075 4076 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 4077 AMDGPU_GPU_PAGE_SIZE, 4078 AMDGPU_GEM_DOMAIN_VRAM, 4079 &fw_buf_bo, 4080 &fw_pri_mc_addr, 4081 &fw_pri_cpu_addr); 4082 if (ret) 4083 goto rel_buf; 4084 4085 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 4086 4087 mutex_lock(&adev->psp.mutex); 4088 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 4089 mutex_unlock(&adev->psp.mutex); 4090 4091 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4092 4093 rel_buf: 4094 kvfree(adev->psp.vbflash_tmp_buf); 4095 adev->psp.vbflash_tmp_buf = NULL; 4096 adev->psp.vbflash_image_size = 0; 4097 4098 if (ret) { 4099 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 4100 return ret; 4101 } 4102 4103 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 4104 return 0; 4105 } 4106 4107 /** 4108 * DOC: psp_vbflash 4109 * Writing to this file will stage an IFWI for update. Reading from this file 4110 * will trigger the update process. 4111 */ 4112 static struct bin_attribute psp_vbflash_bin_attr = { 4113 .attr = {.name = "psp_vbflash", .mode = 0660}, 4114 .size = 0, 4115 .write = amdgpu_psp_vbflash_write, 4116 .read = amdgpu_psp_vbflash_read, 4117 }; 4118 4119 /** 4120 * DOC: psp_vbflash_status 4121 * The status of the flash process. 4122 * 0: IFWI flash not complete. 4123 * 1: IFWI flash complete. 4124 */ 4125 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 4126 struct device_attribute *attr, 4127 char *buf) 4128 { 4129 struct drm_device *ddev = dev_get_drvdata(dev); 4130 struct amdgpu_device *adev = drm_to_adev(ddev); 4131 uint32_t vbflash_status; 4132 4133 vbflash_status = psp_vbflash_status(&adev->psp); 4134 if (!adev->psp.vbflash_done) 4135 vbflash_status = 0; 4136 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 4137 vbflash_status = 1; 4138 4139 return sysfs_emit(buf, "0x%x\n", vbflash_status); 4140 } 4141 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 4142 4143 static struct bin_attribute *bin_flash_attrs[] = { 4144 &psp_vbflash_bin_attr, 4145 NULL 4146 }; 4147 4148 static struct attribute *flash_attrs[] = { 4149 &dev_attr_psp_vbflash_status.attr, 4150 &dev_attr_usbc_pd_fw.attr, 4151 NULL 4152 }; 4153 4154 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 4155 { 4156 struct device *dev = kobj_to_dev(kobj); 4157 struct drm_device *ddev = dev_get_drvdata(dev); 4158 struct amdgpu_device *adev = drm_to_adev(ddev); 4159 4160 if (attr == &dev_attr_usbc_pd_fw.attr) 4161 return adev->psp.sup_pd_fw_up ? 0660 : 0; 4162 4163 return adev->psp.sup_ifwi_up ? 0440 : 0; 4164 } 4165 4166 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 4167 const struct bin_attribute *attr, 4168 int idx) 4169 { 4170 struct device *dev = kobj_to_dev(kobj); 4171 struct drm_device *ddev = dev_get_drvdata(dev); 4172 struct amdgpu_device *adev = drm_to_adev(ddev); 4173 4174 return adev->psp.sup_ifwi_up ? 0660 : 0; 4175 } 4176 4177 const struct attribute_group amdgpu_flash_attr_group = { 4178 .attrs = flash_attrs, 4179 .bin_attrs = bin_flash_attrs, 4180 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 4181 .is_visible = amdgpu_flash_attr_is_visible, 4182 }; 4183 4184 const struct amd_ip_funcs psp_ip_funcs = { 4185 .name = "psp", 4186 .early_init = psp_early_init, 4187 .sw_init = psp_sw_init, 4188 .sw_fini = psp_sw_fini, 4189 .hw_init = psp_hw_init, 4190 .hw_fini = psp_hw_fini, 4191 .suspend = psp_suspend, 4192 .resume = psp_resume, 4193 .set_clockgating_state = psp_set_clockgating_state, 4194 .set_powergating_state = psp_set_powergating_state, 4195 }; 4196 4197 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 4198 .type = AMD_IP_BLOCK_TYPE_PSP, 4199 .major = 3, 4200 .minor = 1, 4201 .rev = 0, 4202 .funcs = &psp_ip_funcs, 4203 }; 4204 4205 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 4206 .type = AMD_IP_BLOCK_TYPE_PSP, 4207 .major = 10, 4208 .minor = 0, 4209 .rev = 0, 4210 .funcs = &psp_ip_funcs, 4211 }; 4212 4213 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 4214 .type = AMD_IP_BLOCK_TYPE_PSP, 4215 .major = 11, 4216 .minor = 0, 4217 .rev = 0, 4218 .funcs = &psp_ip_funcs, 4219 }; 4220 4221 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 4222 .type = AMD_IP_BLOCK_TYPE_PSP, 4223 .major = 11, 4224 .minor = 0, 4225 .rev = 8, 4226 .funcs = &psp_ip_funcs, 4227 }; 4228 4229 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 4230 .type = AMD_IP_BLOCK_TYPE_PSP, 4231 .major = 12, 4232 .minor = 0, 4233 .rev = 0, 4234 .funcs = &psp_ip_funcs, 4235 }; 4236 4237 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4238 .type = AMD_IP_BLOCK_TYPE_PSP, 4239 .major = 13, 4240 .minor = 0, 4241 .rev = 0, 4242 .funcs = &psp_ip_funcs, 4243 }; 4244 4245 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4246 .type = AMD_IP_BLOCK_TYPE_PSP, 4247 .major = 13, 4248 .minor = 0, 4249 .rev = 4, 4250 .funcs = &psp_ip_funcs, 4251 }; 4252 4253 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4254 .type = AMD_IP_BLOCK_TYPE_PSP, 4255 .major = 14, 4256 .minor = 0, 4257 .rev = 0, 4258 .funcs = &psp_ip_funcs, 4259 }; 4260