1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 #include "psp_v14_0.h" 42 43 #include "amdgpu_ras.h" 44 #include "amdgpu_securedisplay.h" 45 #include "amdgpu_atomfirmware.h" 46 47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16) 48 49 static int psp_load_smu_fw(struct psp_context *psp); 50 static int psp_rap_terminate(struct psp_context *psp); 51 static int psp_securedisplay_terminate(struct psp_context *psp); 52 53 static int psp_ring_init(struct psp_context *psp, 54 enum psp_ring_type ring_type) 55 { 56 int ret = 0; 57 struct psp_ring *ring; 58 struct amdgpu_device *adev = psp->adev; 59 60 ring = &psp->km_ring; 61 62 ring->ring_type = ring_type; 63 64 /* allocate 4k Page of Local Frame Buffer memory for ring */ 65 ring->ring_size = 0x1000; 66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 67 AMDGPU_GEM_DOMAIN_VRAM | 68 AMDGPU_GEM_DOMAIN_GTT, 69 &adev->firmware.rbuf, 70 &ring->ring_mem_mc_addr, 71 (void **)&ring->ring_mem); 72 if (ret) { 73 ring->ring_size = 0; 74 return ret; 75 } 76 77 return 0; 78 } 79 80 /* 81 * Due to DF Cstate management centralized to PMFW, the firmware 82 * loading sequence will be updated as below: 83 * - Load KDB 84 * - Load SYS_DRV 85 * - Load tOS 86 * - Load PMFW 87 * - Setup TMR 88 * - Load other non-psp fw 89 * - Load ASD 90 * - Load XGMI/RAS/HDCP/DTM TA if any 91 * 92 * This new sequence is required for 93 * - Arcturus and onwards 94 */ 95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 96 { 97 struct amdgpu_device *adev = psp->adev; 98 99 if (amdgpu_sriov_vf(adev)) { 100 psp->pmfw_centralized_cstate_management = false; 101 return; 102 } 103 104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 105 case IP_VERSION(11, 0, 0): 106 case IP_VERSION(11, 0, 4): 107 case IP_VERSION(11, 0, 5): 108 case IP_VERSION(11, 0, 7): 109 case IP_VERSION(11, 0, 9): 110 case IP_VERSION(11, 0, 11): 111 case IP_VERSION(11, 0, 12): 112 case IP_VERSION(11, 0, 13): 113 case IP_VERSION(13, 0, 0): 114 case IP_VERSION(13, 0, 2): 115 case IP_VERSION(13, 0, 7): 116 psp->pmfw_centralized_cstate_management = true; 117 break; 118 default: 119 psp->pmfw_centralized_cstate_management = false; 120 break; 121 } 122 } 123 124 static int psp_init_sriov_microcode(struct psp_context *psp) 125 { 126 struct amdgpu_device *adev = psp->adev; 127 char ucode_prefix[30]; 128 int ret = 0; 129 130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 131 132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 133 case IP_VERSION(9, 0, 0): 134 case IP_VERSION(11, 0, 7): 135 case IP_VERSION(11, 0, 9): 136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 137 ret = psp_init_cap_microcode(psp, ucode_prefix); 138 break; 139 case IP_VERSION(13, 0, 2): 140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 141 ret = psp_init_cap_microcode(psp, ucode_prefix); 142 ret &= psp_init_ta_microcode(psp, ucode_prefix); 143 break; 144 case IP_VERSION(13, 0, 0): 145 adev->virt.autoload_ucode_id = 0; 146 break; 147 case IP_VERSION(13, 0, 6): 148 case IP_VERSION(13, 0, 14): 149 ret = psp_init_cap_microcode(psp, ucode_prefix); 150 ret &= psp_init_ta_microcode(psp, ucode_prefix); 151 break; 152 case IP_VERSION(13, 0, 10): 153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 154 ret = psp_init_cap_microcode(psp, ucode_prefix); 155 break; 156 default: 157 return -EINVAL; 158 } 159 return ret; 160 } 161 162 static int psp_early_init(struct amdgpu_ip_block *ip_block) 163 { 164 struct amdgpu_device *adev = ip_block->adev; 165 struct psp_context *psp = &adev->psp; 166 167 psp->autoload_supported = true; 168 psp->boot_time_tmr = true; 169 170 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 171 case IP_VERSION(9, 0, 0): 172 psp_v3_1_set_psp_funcs(psp); 173 psp->autoload_supported = false; 174 psp->boot_time_tmr = false; 175 break; 176 case IP_VERSION(10, 0, 0): 177 case IP_VERSION(10, 0, 1): 178 psp_v10_0_set_psp_funcs(psp); 179 psp->autoload_supported = false; 180 psp->boot_time_tmr = false; 181 break; 182 case IP_VERSION(11, 0, 2): 183 case IP_VERSION(11, 0, 4): 184 psp_v11_0_set_psp_funcs(psp); 185 psp->autoload_supported = false; 186 psp->boot_time_tmr = false; 187 break; 188 case IP_VERSION(11, 0, 0): 189 case IP_VERSION(11, 0, 7): 190 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev); 191 fallthrough; 192 case IP_VERSION(11, 0, 5): 193 case IP_VERSION(11, 0, 9): 194 case IP_VERSION(11, 0, 11): 195 case IP_VERSION(11, 5, 0): 196 case IP_VERSION(11, 0, 12): 197 case IP_VERSION(11, 0, 13): 198 psp_v11_0_set_psp_funcs(psp); 199 psp->boot_time_tmr = false; 200 break; 201 case IP_VERSION(11, 0, 3): 202 case IP_VERSION(12, 0, 1): 203 psp_v12_0_set_psp_funcs(psp); 204 psp->autoload_supported = false; 205 psp->boot_time_tmr = false; 206 break; 207 case IP_VERSION(13, 0, 2): 208 psp->boot_time_tmr = false; 209 fallthrough; 210 case IP_VERSION(13, 0, 6): 211 case IP_VERSION(13, 0, 14): 212 psp_v13_0_set_psp_funcs(psp); 213 psp->autoload_supported = false; 214 break; 215 case IP_VERSION(13, 0, 12): 216 psp_v13_0_set_psp_funcs(psp); 217 psp->autoload_supported = false; 218 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 219 break; 220 case IP_VERSION(13, 0, 1): 221 case IP_VERSION(13, 0, 3): 222 case IP_VERSION(13, 0, 5): 223 case IP_VERSION(13, 0, 8): 224 case IP_VERSION(13, 0, 11): 225 case IP_VERSION(14, 0, 0): 226 case IP_VERSION(14, 0, 1): 227 case IP_VERSION(14, 0, 4): 228 psp_v13_0_set_psp_funcs(psp); 229 psp->boot_time_tmr = false; 230 break; 231 case IP_VERSION(11, 0, 8): 232 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 233 psp_v11_0_8_set_psp_funcs(psp); 234 } 235 psp->autoload_supported = false; 236 psp->boot_time_tmr = false; 237 break; 238 case IP_VERSION(13, 0, 0): 239 case IP_VERSION(13, 0, 7): 240 case IP_VERSION(13, 0, 10): 241 psp_v13_0_set_psp_funcs(psp); 242 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); 243 psp->boot_time_tmr = false; 244 break; 245 case IP_VERSION(13, 0, 4): 246 psp_v13_0_4_set_psp_funcs(psp); 247 psp->boot_time_tmr = false; 248 break; 249 case IP_VERSION(14, 0, 2): 250 case IP_VERSION(14, 0, 3): 251 psp_v14_0_set_psp_funcs(psp); 252 break; 253 case IP_VERSION(14, 0, 5): 254 psp_v14_0_set_psp_funcs(psp); 255 psp->boot_time_tmr = false; 256 break; 257 default: 258 return -EINVAL; 259 } 260 261 psp->adev = adev; 262 263 adev->psp_timeout = 20000; 264 265 psp_check_pmfw_centralized_cstate_management(psp); 266 267 if (amdgpu_sriov_vf(adev)) 268 return psp_init_sriov_microcode(psp); 269 else 270 return psp_init_microcode(psp); 271 } 272 273 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 274 { 275 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 276 &mem_ctx->shared_buf); 277 mem_ctx->shared_bo = NULL; 278 } 279 280 static void psp_free_shared_bufs(struct psp_context *psp) 281 { 282 void *tmr_buf; 283 void **pptr; 284 285 /* free TMR memory buffer */ 286 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 287 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 288 psp->tmr_bo = NULL; 289 290 /* free xgmi shared memory */ 291 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 292 293 /* free ras shared memory */ 294 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 295 296 /* free hdcp shared memory */ 297 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 298 299 /* free dtm shared memory */ 300 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 301 302 /* free rap shared memory */ 303 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 304 305 /* free securedisplay shared memory */ 306 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 307 308 309 } 310 311 static void psp_memory_training_fini(struct psp_context *psp) 312 { 313 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 314 315 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 316 kfree(ctx->sys_cache); 317 ctx->sys_cache = NULL; 318 } 319 320 static int psp_memory_training_init(struct psp_context *psp) 321 { 322 int ret; 323 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 324 325 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 326 dev_dbg(psp->adev->dev, "memory training is not supported!\n"); 327 return 0; 328 } 329 330 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 331 if (ctx->sys_cache == NULL) { 332 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n"); 333 ret = -ENOMEM; 334 goto Err_out; 335 } 336 337 dev_dbg(psp->adev->dev, 338 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 339 ctx->train_data_size, 340 ctx->p2c_train_data_offset, 341 ctx->c2p_train_data_offset); 342 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 343 return 0; 344 345 Err_out: 346 psp_memory_training_fini(psp); 347 return ret; 348 } 349 350 /* 351 * Helper funciton to query psp runtime database entry 352 * 353 * @adev: amdgpu_device pointer 354 * @entry_type: the type of psp runtime database entry 355 * @db_entry: runtime database entry pointer 356 * 357 * Return false if runtime database doesn't exit or entry is invalid 358 * or true if the specific database entry is found, and copy to @db_entry 359 */ 360 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 361 enum psp_runtime_entry_type entry_type, 362 void *db_entry) 363 { 364 uint64_t db_header_pos, db_dir_pos; 365 struct psp_runtime_data_header db_header = {0}; 366 struct psp_runtime_data_directory db_dir = {0}; 367 bool ret = false; 368 int i; 369 370 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 371 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 372 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 373 return false; 374 375 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 376 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 377 378 /* read runtime db header from vram */ 379 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 380 sizeof(struct psp_runtime_data_header), false); 381 382 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 383 /* runtime db doesn't exist, exit */ 384 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 385 return false; 386 } 387 388 /* read runtime database entry from vram */ 389 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 390 sizeof(struct psp_runtime_data_directory), false); 391 392 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 393 /* invalid db entry count, exit */ 394 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 395 return false; 396 } 397 398 /* look up for requested entry type */ 399 for (i = 0; i < db_dir.entry_count && !ret; i++) { 400 if (db_dir.entry_list[i].entry_type == entry_type) { 401 switch (entry_type) { 402 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 403 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 404 /* invalid db entry size */ 405 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 406 return false; 407 } 408 /* read runtime database entry */ 409 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 410 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 411 ret = true; 412 break; 413 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 414 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 415 /* invalid db entry size */ 416 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 417 return false; 418 } 419 /* read runtime database entry */ 420 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 421 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 422 ret = true; 423 break; 424 default: 425 ret = false; 426 break; 427 } 428 } 429 } 430 431 return ret; 432 } 433 434 static int psp_sw_init(struct amdgpu_ip_block *ip_block) 435 { 436 struct amdgpu_device *adev = ip_block->adev; 437 struct psp_context *psp = &adev->psp; 438 int ret; 439 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 440 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 441 struct psp_runtime_scpm_entry scpm_entry; 442 443 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 444 if (!psp->cmd) { 445 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n"); 446 ret = -ENOMEM; 447 } 448 449 adev->psp.xgmi_context.supports_extended_data = 450 !adev->gmc.xgmi.connected_to_cpu && 451 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 452 453 memset(&scpm_entry, 0, sizeof(scpm_entry)); 454 if ((psp_get_runtime_db_entry(adev, 455 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 456 &scpm_entry)) && 457 (scpm_entry.scpm_status != SCPM_DISABLE)) { 458 adev->scpm_enabled = true; 459 adev->scpm_status = scpm_entry.scpm_status; 460 } else { 461 adev->scpm_enabled = false; 462 adev->scpm_status = SCPM_DISABLE; 463 } 464 465 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 466 467 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 468 if (psp_get_runtime_db_entry(adev, 469 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 470 &boot_cfg_entry)) { 471 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 472 if ((psp->boot_cfg_bitmask) & 473 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 474 /* If psp runtime database exists, then 475 * only enable two stage memory training 476 * when TWO_STAGE_DRAM_TRAINING bit is set 477 * in runtime database 478 */ 479 mem_training_ctx->enable_mem_training = true; 480 } 481 482 } else { 483 /* If psp runtime database doesn't exist or is 484 * invalid, force enable two stage memory training 485 */ 486 mem_training_ctx->enable_mem_training = true; 487 } 488 489 if (mem_training_ctx->enable_mem_training) { 490 ret = psp_memory_training_init(psp); 491 if (ret) { 492 dev_err(adev->dev, "Failed to initialize memory training!\n"); 493 return ret; 494 } 495 496 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 497 if (ret) { 498 dev_err(adev->dev, "Failed to process memory training!\n"); 499 return ret; 500 } 501 } 502 503 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 504 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 505 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 506 &psp->fw_pri_bo, 507 &psp->fw_pri_mc_addr, 508 &psp->fw_pri_buf); 509 if (ret) 510 return ret; 511 512 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 513 AMDGPU_GEM_DOMAIN_VRAM | 514 AMDGPU_GEM_DOMAIN_GTT, 515 &psp->fence_buf_bo, 516 &psp->fence_buf_mc_addr, 517 &psp->fence_buf); 518 if (ret) 519 goto failed1; 520 521 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 522 AMDGPU_GEM_DOMAIN_VRAM | 523 AMDGPU_GEM_DOMAIN_GTT, 524 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 525 (void **)&psp->cmd_buf_mem); 526 if (ret) 527 goto failed2; 528 529 return 0; 530 531 failed2: 532 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 533 &psp->fence_buf_mc_addr, &psp->fence_buf); 534 failed1: 535 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 536 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 537 return ret; 538 } 539 540 static int psp_sw_fini(struct amdgpu_ip_block *ip_block) 541 { 542 struct amdgpu_device *adev = ip_block->adev; 543 struct psp_context *psp = &adev->psp; 544 545 psp_memory_training_fini(psp); 546 547 amdgpu_ucode_release(&psp->sos_fw); 548 amdgpu_ucode_release(&psp->asd_fw); 549 amdgpu_ucode_release(&psp->ta_fw); 550 amdgpu_ucode_release(&psp->cap_fw); 551 amdgpu_ucode_release(&psp->toc_fw); 552 553 kfree(psp->cmd); 554 psp->cmd = NULL; 555 556 psp_free_shared_bufs(psp); 557 558 if (psp->km_ring.ring_mem) 559 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 560 &psp->km_ring.ring_mem_mc_addr, 561 (void **)&psp->km_ring.ring_mem); 562 563 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 564 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 565 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 566 &psp->fence_buf_mc_addr, &psp->fence_buf); 567 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 568 (void **)&psp->cmd_buf_mem); 569 570 return 0; 571 } 572 573 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 574 uint32_t reg_val, uint32_t mask, bool check_changed) 575 { 576 uint32_t val; 577 int i; 578 struct amdgpu_device *adev = psp->adev; 579 580 if (psp->adev->no_hw_access) 581 return 0; 582 583 for (i = 0; i < adev->usec_timeout; i++) { 584 val = RREG32(reg_index); 585 if (check_changed) { 586 if (val != reg_val) 587 return 0; 588 } else { 589 if ((val & mask) == reg_val) 590 return 0; 591 } 592 udelay(1); 593 } 594 595 return -ETIME; 596 } 597 598 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, 599 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout) 600 { 601 uint32_t val; 602 int i; 603 struct amdgpu_device *adev = psp->adev; 604 605 if (psp->adev->no_hw_access) 606 return 0; 607 608 for (i = 0; i < msec_timeout; i++) { 609 val = RREG32(reg_index); 610 if ((val & mask) == reg_val) 611 return 0; 612 msleep(1); 613 } 614 615 return -ETIME; 616 } 617 618 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 619 { 620 switch (cmd_id) { 621 case GFX_CMD_ID_LOAD_TA: 622 return "LOAD_TA"; 623 case GFX_CMD_ID_UNLOAD_TA: 624 return "UNLOAD_TA"; 625 case GFX_CMD_ID_INVOKE_CMD: 626 return "INVOKE_CMD"; 627 case GFX_CMD_ID_LOAD_ASD: 628 return "LOAD_ASD"; 629 case GFX_CMD_ID_SETUP_TMR: 630 return "SETUP_TMR"; 631 case GFX_CMD_ID_LOAD_IP_FW: 632 return "LOAD_IP_FW"; 633 case GFX_CMD_ID_DESTROY_TMR: 634 return "DESTROY_TMR"; 635 case GFX_CMD_ID_SAVE_RESTORE: 636 return "SAVE_RESTORE_IP_FW"; 637 case GFX_CMD_ID_SETUP_VMR: 638 return "SETUP_VMR"; 639 case GFX_CMD_ID_DESTROY_VMR: 640 return "DESTROY_VMR"; 641 case GFX_CMD_ID_PROG_REG: 642 return "PROG_REG"; 643 case GFX_CMD_ID_GET_FW_ATTESTATION: 644 return "GET_FW_ATTESTATION"; 645 case GFX_CMD_ID_LOAD_TOC: 646 return "ID_LOAD_TOC"; 647 case GFX_CMD_ID_AUTOLOAD_RLC: 648 return "AUTOLOAD_RLC"; 649 case GFX_CMD_ID_BOOT_CFG: 650 return "BOOT_CFG"; 651 case GFX_CMD_ID_CONFIG_SQ_PERFMON: 652 return "CONFIG_SQ_PERFMON"; 653 default: 654 return "UNKNOWN CMD"; 655 } 656 } 657 658 static bool psp_err_warn(struct psp_context *psp) 659 { 660 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 661 662 /* This response indicates reg list is already loaded */ 663 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 664 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 665 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 666 cmd->resp.status == TEE_ERROR_CANCEL) 667 return false; 668 669 return true; 670 } 671 672 static int 673 psp_cmd_submit_buf(struct psp_context *psp, 674 struct amdgpu_firmware_info *ucode, 675 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 676 { 677 int ret; 678 int index; 679 int timeout = psp->adev->psp_timeout; 680 bool ras_intr = false; 681 bool skip_unsupport = false; 682 683 if (psp->adev->no_hw_access) 684 return 0; 685 686 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 687 688 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 689 690 index = atomic_inc_return(&psp->fence_value); 691 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 692 if (ret) { 693 atomic_dec(&psp->fence_value); 694 goto exit; 695 } 696 697 amdgpu_device_invalidate_hdp(psp->adev, NULL); 698 while (*((unsigned int *)psp->fence_buf) != index) { 699 if (--timeout == 0) 700 break; 701 /* 702 * Shouldn't wait for timeout when err_event_athub occurs, 703 * because gpu reset thread triggered and lock resource should 704 * be released for psp resume sequence. 705 */ 706 ras_intr = amdgpu_ras_intr_triggered(); 707 if (ras_intr) 708 break; 709 usleep_range(10, 100); 710 amdgpu_device_invalidate_hdp(psp->adev, NULL); 711 } 712 713 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 714 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 715 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 716 717 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 718 719 /* In some cases, psp response status is not 0 even there is no 720 * problem while the command is submitted. Some version of PSP FW 721 * doesn't write 0 to that field. 722 * So here we would like to only print a warning instead of an error 723 * during psp initialization to avoid breaking hw_init and it doesn't 724 * return -EINVAL. 725 */ 726 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 727 if (ucode) 728 dev_warn(psp->adev->dev, 729 "failed to load ucode %s(0x%X) ", 730 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 731 if (psp_err_warn(psp)) 732 dev_warn( 733 psp->adev->dev, 734 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 735 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 736 psp->cmd_buf_mem->cmd_id, 737 psp->cmd_buf_mem->resp.status); 738 /* If any firmware (including CAP) load fails under SRIOV, it should 739 * return failure to stop the VF from initializing. 740 * Also return failure in case of timeout 741 */ 742 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 743 ret = -EINVAL; 744 goto exit; 745 } 746 } 747 748 if (ucode) { 749 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 750 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 751 } 752 753 exit: 754 return ret; 755 } 756 757 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 758 { 759 struct psp_gfx_cmd_resp *cmd = psp->cmd; 760 761 mutex_lock(&psp->mutex); 762 763 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 764 765 return cmd; 766 } 767 768 static void release_psp_cmd_buf(struct psp_context *psp) 769 { 770 mutex_unlock(&psp->mutex); 771 } 772 773 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 774 struct psp_gfx_cmd_resp *cmd, 775 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 776 { 777 struct amdgpu_device *adev = psp->adev; 778 uint32_t size = 0; 779 uint64_t tmr_pa = 0; 780 781 if (tmr_bo) { 782 size = amdgpu_bo_size(tmr_bo); 783 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 784 } 785 786 if (amdgpu_sriov_vf(psp->adev)) 787 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 788 else 789 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 790 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 791 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 792 cmd->cmd.cmd_setup_tmr.buf_size = size; 793 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 794 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 795 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 796 } 797 798 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 799 uint64_t pri_buf_mc, uint32_t size) 800 { 801 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 802 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 803 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 804 cmd->cmd.cmd_load_toc.toc_size = size; 805 } 806 807 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 808 static int psp_load_toc(struct psp_context *psp, 809 uint32_t *tmr_size) 810 { 811 int ret; 812 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 813 814 /* Copy toc to psp firmware private buffer */ 815 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 816 817 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 818 819 ret = psp_cmd_submit_buf(psp, NULL, cmd, 820 psp->fence_buf_mc_addr); 821 if (!ret) 822 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 823 824 release_psp_cmd_buf(psp); 825 826 return ret; 827 } 828 829 /* Set up Trusted Memory Region */ 830 static int psp_tmr_init(struct psp_context *psp) 831 { 832 int ret = 0; 833 int tmr_size; 834 void *tmr_buf; 835 void **pptr; 836 837 /* 838 * According to HW engineer, they prefer the TMR address be "naturally 839 * aligned" , e.g. the start address be an integer divide of TMR size. 840 * 841 * Note: this memory need be reserved till the driver 842 * uninitializes. 843 */ 844 tmr_size = PSP_TMR_SIZE(psp->adev); 845 846 /* For ASICs support RLC autoload, psp will parse the toc 847 * and calculate the total size of TMR needed 848 */ 849 if (!amdgpu_sriov_vf(psp->adev) && 850 psp->toc.start_addr && 851 psp->toc.size_bytes && 852 psp->fw_pri_buf) { 853 ret = psp_load_toc(psp, &tmr_size); 854 if (ret) { 855 dev_err(psp->adev->dev, "Failed to load toc\n"); 856 return ret; 857 } 858 } 859 860 if (!psp->tmr_bo && !psp->boot_time_tmr) { 861 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 862 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 863 PSP_TMR_ALIGNMENT, 864 AMDGPU_HAS_VRAM(psp->adev) ? 865 AMDGPU_GEM_DOMAIN_VRAM : 866 AMDGPU_GEM_DOMAIN_GTT, 867 &psp->tmr_bo, &psp->tmr_mc_addr, 868 pptr); 869 } 870 871 return ret; 872 } 873 874 static bool psp_skip_tmr(struct psp_context *psp) 875 { 876 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 877 case IP_VERSION(11, 0, 9): 878 case IP_VERSION(11, 0, 7): 879 case IP_VERSION(13, 0, 2): 880 case IP_VERSION(13, 0, 6): 881 case IP_VERSION(13, 0, 10): 882 case IP_VERSION(13, 0, 12): 883 case IP_VERSION(13, 0, 14): 884 return true; 885 default: 886 return false; 887 } 888 } 889 890 static int psp_tmr_load(struct psp_context *psp) 891 { 892 int ret; 893 struct psp_gfx_cmd_resp *cmd; 894 895 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 896 * Already set up by host driver. 897 */ 898 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 899 return 0; 900 901 cmd = acquire_psp_cmd_buf(psp); 902 903 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 904 if (psp->tmr_bo) 905 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n", 906 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 907 908 ret = psp_cmd_submit_buf(psp, NULL, cmd, 909 psp->fence_buf_mc_addr); 910 911 release_psp_cmd_buf(psp); 912 913 return ret; 914 } 915 916 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 917 struct psp_gfx_cmd_resp *cmd) 918 { 919 if (amdgpu_sriov_vf(psp->adev)) 920 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 921 else 922 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 923 } 924 925 static int psp_tmr_unload(struct psp_context *psp) 926 { 927 int ret; 928 struct psp_gfx_cmd_resp *cmd; 929 930 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV, 931 * as TMR is not loaded at all 932 */ 933 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 934 return 0; 935 936 cmd = acquire_psp_cmd_buf(psp); 937 938 psp_prep_tmr_unload_cmd_buf(psp, cmd); 939 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 940 941 ret = psp_cmd_submit_buf(psp, NULL, cmd, 942 psp->fence_buf_mc_addr); 943 944 release_psp_cmd_buf(psp); 945 946 return ret; 947 } 948 949 static int psp_tmr_terminate(struct psp_context *psp) 950 { 951 return psp_tmr_unload(psp); 952 } 953 954 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 955 uint64_t *output_ptr) 956 { 957 int ret; 958 struct psp_gfx_cmd_resp *cmd; 959 960 if (!output_ptr) 961 return -EINVAL; 962 963 if (amdgpu_sriov_vf(psp->adev)) 964 return 0; 965 966 cmd = acquire_psp_cmd_buf(psp); 967 968 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 969 970 ret = psp_cmd_submit_buf(psp, NULL, cmd, 971 psp->fence_buf_mc_addr); 972 973 if (!ret) { 974 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 975 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 976 } 977 978 release_psp_cmd_buf(psp); 979 980 return ret; 981 } 982 983 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 984 { 985 struct psp_context *psp = &adev->psp; 986 struct psp_gfx_cmd_resp *cmd; 987 int ret; 988 989 if (amdgpu_sriov_vf(adev)) 990 return 0; 991 992 cmd = acquire_psp_cmd_buf(psp); 993 994 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 995 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 996 997 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 998 if (!ret) { 999 *boot_cfg = 1000 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 1001 } 1002 1003 release_psp_cmd_buf(psp); 1004 1005 return ret; 1006 } 1007 1008 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 1009 { 1010 int ret; 1011 struct psp_context *psp = &adev->psp; 1012 struct psp_gfx_cmd_resp *cmd; 1013 1014 if (amdgpu_sriov_vf(adev)) 1015 return 0; 1016 1017 cmd = acquire_psp_cmd_buf(psp); 1018 1019 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 1020 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 1021 cmd->cmd.boot_cfg.boot_config = boot_cfg; 1022 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 1023 1024 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1025 1026 release_psp_cmd_buf(psp); 1027 1028 return ret; 1029 } 1030 1031 static int psp_rl_load(struct amdgpu_device *adev) 1032 { 1033 int ret; 1034 struct psp_context *psp = &adev->psp; 1035 struct psp_gfx_cmd_resp *cmd; 1036 1037 if (!is_psp_fw_valid(psp->rl)) 1038 return 0; 1039 1040 cmd = acquire_psp_cmd_buf(psp); 1041 1042 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1043 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 1044 1045 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1046 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 1047 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 1048 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 1049 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 1050 1051 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1052 1053 release_psp_cmd_buf(psp); 1054 1055 return ret; 1056 } 1057 1058 int psp_memory_partition(struct psp_context *psp, int mode) 1059 { 1060 struct psp_gfx_cmd_resp *cmd; 1061 int ret; 1062 1063 if (amdgpu_sriov_vf(psp->adev)) 1064 return 0; 1065 1066 cmd = acquire_psp_cmd_buf(psp); 1067 1068 cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE; 1069 cmd->cmd.cmd_memory_part.mode = mode; 1070 1071 dev_info(psp->adev->dev, 1072 "Requesting %d memory partition change through PSP", mode); 1073 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1074 if (ret) 1075 dev_err(psp->adev->dev, 1076 "PSP request failed to change to NPS%d mode\n", mode); 1077 1078 release_psp_cmd_buf(psp); 1079 1080 return ret; 1081 } 1082 1083 int psp_spatial_partition(struct psp_context *psp, int mode) 1084 { 1085 struct psp_gfx_cmd_resp *cmd; 1086 int ret; 1087 1088 if (amdgpu_sriov_vf(psp->adev)) 1089 return 0; 1090 1091 cmd = acquire_psp_cmd_buf(psp); 1092 1093 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART; 1094 cmd->cmd.cmd_spatial_part.mode = mode; 1095 1096 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode); 1097 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1098 1099 release_psp_cmd_buf(psp); 1100 1101 return ret; 1102 } 1103 1104 static int psp_asd_initialize(struct psp_context *psp) 1105 { 1106 int ret; 1107 1108 /* If PSP version doesn't match ASD version, asd loading will be failed. 1109 * add workaround to bypass it for sriov now. 1110 * TODO: add version check to make it common 1111 */ 1112 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 1113 return 0; 1114 1115 /* bypass asd if display hardware is not available */ 1116 if (!amdgpu_device_has_display_hardware(psp->adev) && 1117 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) 1118 return 0; 1119 1120 psp->asd_context.mem_context.shared_mc_addr = 0; 1121 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 1122 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 1123 1124 ret = psp_ta_load(psp, &psp->asd_context); 1125 if (!ret) 1126 psp->asd_context.initialized = true; 1127 1128 return ret; 1129 } 1130 1131 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1132 uint32_t session_id) 1133 { 1134 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1135 cmd->cmd.cmd_unload_ta.session_id = session_id; 1136 } 1137 1138 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1139 { 1140 int ret; 1141 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1142 1143 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1144 1145 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1146 1147 context->resp_status = cmd->resp.status; 1148 1149 release_psp_cmd_buf(psp); 1150 1151 return ret; 1152 } 1153 1154 static int psp_asd_terminate(struct psp_context *psp) 1155 { 1156 int ret; 1157 1158 if (amdgpu_sriov_vf(psp->adev)) 1159 return 0; 1160 1161 if (!psp->asd_context.initialized) 1162 return 0; 1163 1164 ret = psp_ta_unload(psp, &psp->asd_context); 1165 if (!ret) 1166 psp->asd_context.initialized = false; 1167 1168 return ret; 1169 } 1170 1171 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1172 uint32_t id, uint32_t value) 1173 { 1174 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1175 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1176 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1177 } 1178 1179 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1180 uint32_t value) 1181 { 1182 struct psp_gfx_cmd_resp *cmd; 1183 int ret = 0; 1184 1185 if (reg >= PSP_REG_LAST) 1186 return -EINVAL; 1187 1188 cmd = acquire_psp_cmd_buf(psp); 1189 1190 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1191 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1192 if (ret) 1193 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg); 1194 1195 release_psp_cmd_buf(psp); 1196 1197 return ret; 1198 } 1199 1200 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1201 uint64_t ta_bin_mc, 1202 struct ta_context *context) 1203 { 1204 cmd->cmd_id = context->ta_load_type; 1205 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1206 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1207 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1208 1209 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1210 lower_32_bits(context->mem_context.shared_mc_addr); 1211 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1212 upper_32_bits(context->mem_context.shared_mc_addr); 1213 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1214 } 1215 1216 int psp_ta_init_shared_buf(struct psp_context *psp, 1217 struct ta_mem_context *mem_ctx) 1218 { 1219 /* 1220 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1221 * physical) for ta to host memory 1222 */ 1223 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1224 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1225 AMDGPU_GEM_DOMAIN_GTT, 1226 &mem_ctx->shared_bo, 1227 &mem_ctx->shared_mc_addr, 1228 &mem_ctx->shared_buf); 1229 } 1230 1231 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1232 uint32_t ta_cmd_id, 1233 uint32_t session_id) 1234 { 1235 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1236 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1237 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1238 } 1239 1240 int psp_ta_invoke(struct psp_context *psp, 1241 uint32_t ta_cmd_id, 1242 struct ta_context *context) 1243 { 1244 int ret; 1245 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1246 1247 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1248 1249 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1250 psp->fence_buf_mc_addr); 1251 1252 context->resp_status = cmd->resp.status; 1253 1254 release_psp_cmd_buf(psp); 1255 1256 return ret; 1257 } 1258 1259 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1260 { 1261 int ret; 1262 struct psp_gfx_cmd_resp *cmd; 1263 1264 cmd = acquire_psp_cmd_buf(psp); 1265 1266 psp_copy_fw(psp, context->bin_desc.start_addr, 1267 context->bin_desc.size_bytes); 1268 1269 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1270 1271 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1272 psp->fence_buf_mc_addr); 1273 1274 context->resp_status = cmd->resp.status; 1275 1276 if (!ret) 1277 context->session_id = cmd->resp.session_id; 1278 1279 release_psp_cmd_buf(psp); 1280 1281 return ret; 1282 } 1283 1284 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1285 { 1286 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1287 } 1288 1289 int psp_xgmi_terminate(struct psp_context *psp) 1290 { 1291 int ret; 1292 struct amdgpu_device *adev = psp->adev; 1293 1294 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1295 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1296 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1297 adev->gmc.xgmi.connected_to_cpu)) 1298 return 0; 1299 1300 if (!psp->xgmi_context.context.initialized) 1301 return 0; 1302 1303 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1304 1305 psp->xgmi_context.context.initialized = false; 1306 1307 return ret; 1308 } 1309 1310 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1311 { 1312 struct ta_xgmi_shared_memory *xgmi_cmd; 1313 int ret; 1314 1315 if (!psp->ta_fw || 1316 !psp->xgmi_context.context.bin_desc.size_bytes || 1317 !psp->xgmi_context.context.bin_desc.start_addr) 1318 return -ENOENT; 1319 1320 if (!load_ta) 1321 goto invoke; 1322 1323 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1324 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1325 1326 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1327 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1328 if (ret) 1329 return ret; 1330 } 1331 1332 /* Load XGMI TA */ 1333 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1334 if (!ret) 1335 psp->xgmi_context.context.initialized = true; 1336 else 1337 return ret; 1338 1339 invoke: 1340 /* Initialize XGMI session */ 1341 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1342 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1343 xgmi_cmd->flag_extend_link_record = set_extended_data; 1344 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1345 1346 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1347 /* note down the capbility flag for XGMI TA */ 1348 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag; 1349 1350 return ret; 1351 } 1352 1353 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1354 { 1355 struct ta_xgmi_shared_memory *xgmi_cmd; 1356 int ret; 1357 1358 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1359 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1360 1361 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1362 1363 /* Invoke xgmi ta to get hive id */ 1364 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1365 if (ret) 1366 return ret; 1367 1368 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1369 1370 return 0; 1371 } 1372 1373 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1374 { 1375 struct ta_xgmi_shared_memory *xgmi_cmd; 1376 int ret; 1377 1378 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1379 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1380 1381 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1382 1383 /* Invoke xgmi ta to get the node id */ 1384 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1385 if (ret) 1386 return ret; 1387 1388 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1389 1390 return 0; 1391 } 1392 1393 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1394 { 1395 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1396 IP_VERSION(13, 0, 2) && 1397 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1398 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1399 IP_VERSION(13, 0, 6); 1400 } 1401 1402 /* 1403 * Chips that support extended topology information require the driver to 1404 * reflect topology information in the opposite direction. This is 1405 * because the TA has already exceeded its link record limit and if the 1406 * TA holds bi-directional information, the driver would have to do 1407 * multiple fetches instead of just two. 1408 */ 1409 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1410 struct psp_xgmi_node_info node_info) 1411 { 1412 struct amdgpu_device *mirror_adev; 1413 struct amdgpu_hive_info *hive; 1414 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1415 uint64_t dst_node_id = node_info.node_id; 1416 uint8_t dst_num_hops = node_info.num_hops; 1417 uint8_t dst_num_links = node_info.num_links; 1418 1419 hive = amdgpu_get_xgmi_hive(psp->adev); 1420 if (WARN_ON(!hive)) 1421 return; 1422 1423 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1424 struct psp_xgmi_topology_info *mirror_top_info; 1425 int j; 1426 1427 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1428 continue; 1429 1430 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1431 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1432 if (mirror_top_info->nodes[j].node_id != src_node_id) 1433 continue; 1434 1435 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1436 /* 1437 * prevent 0 num_links value re-reflection since reflection 1438 * criteria is based on num_hops (direct or indirect). 1439 * 1440 */ 1441 if (dst_num_links) 1442 mirror_top_info->nodes[j].num_links = dst_num_links; 1443 1444 break; 1445 } 1446 1447 break; 1448 } 1449 1450 amdgpu_put_xgmi_hive(hive); 1451 } 1452 1453 int psp_xgmi_get_topology_info(struct psp_context *psp, 1454 int number_devices, 1455 struct psp_xgmi_topology_info *topology, 1456 bool get_extended_data) 1457 { 1458 struct ta_xgmi_shared_memory *xgmi_cmd; 1459 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1460 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1461 int i; 1462 int ret; 1463 1464 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1465 return -EINVAL; 1466 1467 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1468 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1469 xgmi_cmd->flag_extend_link_record = get_extended_data; 1470 1471 /* Fill in the shared memory with topology information as input */ 1472 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1473 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO; 1474 topology_info_input->num_nodes = number_devices; 1475 1476 for (i = 0; i < topology_info_input->num_nodes; i++) { 1477 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1478 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1479 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1480 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1481 } 1482 1483 /* Invoke xgmi ta to get the topology information */ 1484 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO); 1485 if (ret) 1486 return ret; 1487 1488 /* Read the output topology information from the shared memory */ 1489 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1490 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1491 for (i = 0; i < topology->num_nodes; i++) { 1492 /* extended data will either be 0 or equal to non-extended data */ 1493 if (topology_info_output->nodes[i].num_hops) 1494 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1495 1496 /* non-extended data gets everything here so no need to update */ 1497 if (!get_extended_data) { 1498 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1499 topology->nodes[i].is_sharing_enabled = 1500 topology_info_output->nodes[i].is_sharing_enabled; 1501 topology->nodes[i].sdma_engine = 1502 topology_info_output->nodes[i].sdma_engine; 1503 } 1504 1505 } 1506 1507 /* Invoke xgmi ta again to get the link information */ 1508 if (psp_xgmi_peer_link_info_supported(psp)) { 1509 struct ta_xgmi_cmd_get_peer_link_info *link_info_output; 1510 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output; 1511 bool requires_reflection = 1512 (psp->xgmi_context.supports_extended_data && 1513 get_extended_data) || 1514 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1515 IP_VERSION(13, 0, 6) || 1516 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1517 IP_VERSION(13, 0, 14); 1518 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : 1519 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; 1520 1521 /* popluate the shared output buffer rather than the cmd input buffer 1522 * with node_ids as the input for GET_PEER_LINKS command execution. 1523 * This is required for GET_PEER_LINKS per xgmi ta implementation. 1524 * The same requirement for GET_EXTEND_PEER_LINKS command. 1525 */ 1526 if (ta_port_num_support) { 1527 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info; 1528 1529 for (i = 0; i < topology->num_nodes; i++) 1530 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1531 1532 link_extend_info_output->num_nodes = topology->num_nodes; 1533 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS; 1534 } else { 1535 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1536 1537 for (i = 0; i < topology->num_nodes; i++) 1538 link_info_output->nodes[i].node_id = topology->nodes[i].node_id; 1539 1540 link_info_output->num_nodes = topology->num_nodes; 1541 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1542 } 1543 1544 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1545 if (ret) 1546 return ret; 1547 1548 for (i = 0; i < topology->num_nodes; i++) { 1549 uint8_t node_num_links = ta_port_num_support ? 1550 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links; 1551 /* accumulate num_links on extended data */ 1552 if (get_extended_data) { 1553 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links; 1554 } else { 1555 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ? 1556 topology->nodes[i].num_links : node_num_links; 1557 } 1558 /* popluate the connected port num info if supported and available */ 1559 if (ta_port_num_support && topology->nodes[i].num_links) { 1560 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num, 1561 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM); 1562 } 1563 1564 /* reflect the topology information for bi-directionality */ 1565 if (requires_reflection && topology->nodes[i].num_hops) 1566 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1567 } 1568 } 1569 1570 return 0; 1571 } 1572 1573 int psp_xgmi_set_topology_info(struct psp_context *psp, 1574 int number_devices, 1575 struct psp_xgmi_topology_info *topology) 1576 { 1577 struct ta_xgmi_shared_memory *xgmi_cmd; 1578 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1579 int i; 1580 1581 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1582 return -EINVAL; 1583 1584 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1585 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1586 1587 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1588 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1589 topology_info_input->num_nodes = number_devices; 1590 1591 for (i = 0; i < topology_info_input->num_nodes; i++) { 1592 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1593 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1594 topology_info_input->nodes[i].is_sharing_enabled = 1; 1595 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1596 } 1597 1598 /* Invoke xgmi ta to set topology information */ 1599 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1600 } 1601 1602 // ras begin 1603 static void psp_ras_ta_check_status(struct psp_context *psp) 1604 { 1605 struct ta_ras_shared_memory *ras_cmd = 1606 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1607 1608 switch (ras_cmd->ras_status) { 1609 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1610 dev_warn(psp->adev->dev, 1611 "RAS WARNING: cmd failed due to unsupported ip\n"); 1612 break; 1613 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1614 dev_warn(psp->adev->dev, 1615 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1616 break; 1617 case TA_RAS_STATUS__SUCCESS: 1618 break; 1619 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1620 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1621 dev_warn(psp->adev->dev, 1622 "RAS WARNING: Inject error to critical region is not allowed\n"); 1623 break; 1624 default: 1625 dev_warn(psp->adev->dev, 1626 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1627 break; 1628 } 1629 } 1630 1631 static int psp_ras_send_cmd(struct psp_context *psp, 1632 enum ras_command cmd_id, void *in, void *out) 1633 { 1634 struct ta_ras_shared_memory *ras_cmd; 1635 uint32_t cmd = cmd_id; 1636 int ret = 0; 1637 1638 if (!in) 1639 return -EINVAL; 1640 1641 mutex_lock(&psp->ras_context.mutex); 1642 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1643 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1644 1645 switch (cmd) { 1646 case TA_RAS_COMMAND__ENABLE_FEATURES: 1647 case TA_RAS_COMMAND__DISABLE_FEATURES: 1648 memcpy(&ras_cmd->ras_in_message, 1649 in, sizeof(ras_cmd->ras_in_message)); 1650 break; 1651 case TA_RAS_COMMAND__TRIGGER_ERROR: 1652 memcpy(&ras_cmd->ras_in_message.trigger_error, 1653 in, sizeof(ras_cmd->ras_in_message.trigger_error)); 1654 break; 1655 case TA_RAS_COMMAND__QUERY_ADDRESS: 1656 memcpy(&ras_cmd->ras_in_message.address, 1657 in, sizeof(ras_cmd->ras_in_message.address)); 1658 break; 1659 default: 1660 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd); 1661 ret = -EINVAL; 1662 goto err_out; 1663 } 1664 1665 ras_cmd->cmd_id = cmd; 1666 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1667 1668 switch (cmd) { 1669 case TA_RAS_COMMAND__TRIGGER_ERROR: 1670 if (!ret && out) 1671 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status)); 1672 break; 1673 case TA_RAS_COMMAND__QUERY_ADDRESS: 1674 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status) 1675 ret = -EINVAL; 1676 else if (out) 1677 memcpy(out, 1678 &ras_cmd->ras_out_message.address, 1679 sizeof(ras_cmd->ras_out_message.address)); 1680 break; 1681 default: 1682 break; 1683 } 1684 1685 err_out: 1686 mutex_unlock(&psp->ras_context.mutex); 1687 1688 return ret; 1689 } 1690 1691 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1692 { 1693 struct ta_ras_shared_memory *ras_cmd; 1694 int ret; 1695 1696 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1697 1698 /* 1699 * TODO: bypass the loading in sriov for now 1700 */ 1701 if (amdgpu_sriov_vf(psp->adev)) 1702 return 0; 1703 1704 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1705 1706 if (amdgpu_ras_intr_triggered()) 1707 return ret; 1708 1709 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { 1710 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n"); 1711 return -EINVAL; 1712 } 1713 1714 if (!ret) { 1715 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1716 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1717 1718 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1719 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1720 dev_warn(psp->adev->dev, 1721 "RAS internal register access blocked\n"); 1722 1723 psp_ras_ta_check_status(psp); 1724 } 1725 1726 return ret; 1727 } 1728 1729 int psp_ras_enable_features(struct psp_context *psp, 1730 union ta_ras_cmd_input *info, bool enable) 1731 { 1732 enum ras_command cmd_id; 1733 int ret; 1734 1735 if (!psp->ras_context.context.initialized || !info) 1736 return -EINVAL; 1737 1738 cmd_id = enable ? 1739 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES; 1740 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL); 1741 if (ret) 1742 return -EINVAL; 1743 1744 return 0; 1745 } 1746 1747 int psp_ras_terminate(struct psp_context *psp) 1748 { 1749 int ret; 1750 1751 /* 1752 * TODO: bypass the terminate in sriov for now 1753 */ 1754 if (amdgpu_sriov_vf(psp->adev)) 1755 return 0; 1756 1757 if (!psp->ras_context.context.initialized) 1758 return 0; 1759 1760 ret = psp_ta_unload(psp, &psp->ras_context.context); 1761 1762 psp->ras_context.context.initialized = false; 1763 1764 mutex_destroy(&psp->ras_context.mutex); 1765 1766 return ret; 1767 } 1768 1769 int psp_ras_initialize(struct psp_context *psp) 1770 { 1771 int ret; 1772 uint32_t boot_cfg = 0xFF; 1773 struct amdgpu_device *adev = psp->adev; 1774 struct ta_ras_shared_memory *ras_cmd; 1775 1776 /* 1777 * TODO: bypass the initialize in sriov for now 1778 */ 1779 if (amdgpu_sriov_vf(adev)) 1780 return 0; 1781 1782 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1783 !adev->psp.ras_context.context.bin_desc.start_addr) { 1784 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1785 return 0; 1786 } 1787 1788 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1789 /* query GECC enablement status from boot config 1790 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1791 */ 1792 ret = psp_boot_config_get(adev, &boot_cfg); 1793 if (ret) 1794 dev_warn(adev->dev, "PSP get boot config failed\n"); 1795 1796 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { 1797 if (!boot_cfg) { 1798 dev_info(adev->dev, "GECC is disabled\n"); 1799 } else { 1800 /* disable GECC in next boot cycle if ras is 1801 * disabled by module parameter amdgpu_ras_enable 1802 * and/or amdgpu_ras_mask, or boot_config_get call 1803 * is failed 1804 */ 1805 ret = psp_boot_config_set(adev, 0); 1806 if (ret) 1807 dev_warn(adev->dev, "PSP set boot config failed\n"); 1808 else 1809 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1810 } 1811 } else { 1812 if (boot_cfg == 1) { 1813 dev_info(adev->dev, "GECC is enabled\n"); 1814 } else { 1815 /* enable GECC in next boot cycle if it is disabled 1816 * in boot config, or force enable GECC if failed to 1817 * get boot configuration 1818 */ 1819 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1820 if (ret) 1821 dev_warn(adev->dev, "PSP set boot config failed\n"); 1822 else 1823 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1824 } 1825 } 1826 } 1827 1828 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1829 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1830 1831 if (!psp->ras_context.context.mem_context.shared_buf) { 1832 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1833 if (ret) 1834 return ret; 1835 } 1836 1837 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1838 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1839 1840 if (amdgpu_ras_is_poison_mode_supported(adev)) 1841 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1842 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1843 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1844 ras_cmd->ras_in_message.init_flags.xcc_mask = 1845 adev->gfx.xcc_mask; 1846 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; 1847 if (adev->gmc.gmc_funcs->query_mem_partition_mode) 1848 ras_cmd->ras_in_message.init_flags.nps_mode = 1849 adev->gmc.gmc_funcs->query_mem_partition_mode(adev); 1850 1851 ret = psp_ta_load(psp, &psp->ras_context.context); 1852 1853 if (!ret && !ras_cmd->ras_status) { 1854 psp->ras_context.context.initialized = true; 1855 mutex_init(&psp->ras_context.mutex); 1856 } else { 1857 if (ras_cmd->ras_status) 1858 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1859 1860 /* fail to load RAS TA */ 1861 psp->ras_context.context.initialized = false; 1862 } 1863 1864 return ret; 1865 } 1866 1867 int psp_ras_trigger_error(struct psp_context *psp, 1868 struct ta_ras_trigger_error_input *info, uint32_t instance_mask) 1869 { 1870 struct amdgpu_device *adev = psp->adev; 1871 int ret; 1872 uint32_t dev_mask; 1873 uint32_t ras_status = 0; 1874 1875 if (!psp->ras_context.context.initialized || !info) 1876 return -EINVAL; 1877 1878 switch (info->block_id) { 1879 case TA_RAS_BLOCK__GFX: 1880 dev_mask = GET_MASK(GC, instance_mask); 1881 break; 1882 case TA_RAS_BLOCK__SDMA: 1883 dev_mask = GET_MASK(SDMA0, instance_mask); 1884 break; 1885 case TA_RAS_BLOCK__VCN: 1886 case TA_RAS_BLOCK__JPEG: 1887 dev_mask = GET_MASK(VCN, instance_mask); 1888 break; 1889 default: 1890 dev_mask = instance_mask; 1891 break; 1892 } 1893 1894 /* reuse sub_block_index for backward compatibility */ 1895 dev_mask <<= AMDGPU_RAS_INST_SHIFT; 1896 dev_mask &= AMDGPU_RAS_INST_MASK; 1897 info->sub_block_index |= dev_mask; 1898 1899 ret = psp_ras_send_cmd(psp, 1900 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status); 1901 if (ret) 1902 return -EINVAL; 1903 1904 /* If err_event_athub occurs error inject was successful, however 1905 * return status from TA is no long reliable 1906 */ 1907 if (amdgpu_ras_intr_triggered()) 1908 return 0; 1909 1910 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 1911 return -EACCES; 1912 else if (ras_status) 1913 return -EINVAL; 1914 1915 return 0; 1916 } 1917 1918 int psp_ras_query_address(struct psp_context *psp, 1919 struct ta_ras_query_address_input *addr_in, 1920 struct ta_ras_query_address_output *addr_out) 1921 { 1922 int ret; 1923 1924 if (!psp->ras_context.context.initialized || 1925 !addr_in || !addr_out) 1926 return -EINVAL; 1927 1928 ret = psp_ras_send_cmd(psp, 1929 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out); 1930 1931 return ret; 1932 } 1933 // ras end 1934 1935 // HDCP start 1936 static int psp_hdcp_initialize(struct psp_context *psp) 1937 { 1938 int ret; 1939 1940 /* 1941 * TODO: bypass the initialize in sriov for now 1942 */ 1943 if (amdgpu_sriov_vf(psp->adev)) 1944 return 0; 1945 1946 /* bypass hdcp initialization if dmu is harvested */ 1947 if (!amdgpu_device_has_display_hardware(psp->adev)) 1948 return 0; 1949 1950 if (!psp->hdcp_context.context.bin_desc.size_bytes || 1951 !psp->hdcp_context.context.bin_desc.start_addr) { 1952 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1953 return 0; 1954 } 1955 1956 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 1957 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1958 1959 if (!psp->hdcp_context.context.mem_context.shared_buf) { 1960 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 1961 if (ret) 1962 return ret; 1963 } 1964 1965 ret = psp_ta_load(psp, &psp->hdcp_context.context); 1966 if (!ret) { 1967 psp->hdcp_context.context.initialized = true; 1968 mutex_init(&psp->hdcp_context.mutex); 1969 } 1970 1971 return ret; 1972 } 1973 1974 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1975 { 1976 /* 1977 * TODO: bypass the loading in sriov for now 1978 */ 1979 if (amdgpu_sriov_vf(psp->adev)) 1980 return 0; 1981 1982 if (!psp->hdcp_context.context.initialized) 1983 return 0; 1984 1985 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 1986 } 1987 1988 static int psp_hdcp_terminate(struct psp_context *psp) 1989 { 1990 int ret; 1991 1992 /* 1993 * TODO: bypass the terminate in sriov for now 1994 */ 1995 if (amdgpu_sriov_vf(psp->adev)) 1996 return 0; 1997 1998 if (!psp->hdcp_context.context.initialized) 1999 return 0; 2000 2001 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 2002 2003 psp->hdcp_context.context.initialized = false; 2004 2005 return ret; 2006 } 2007 // HDCP end 2008 2009 // DTM start 2010 static int psp_dtm_initialize(struct psp_context *psp) 2011 { 2012 int ret; 2013 2014 /* 2015 * TODO: bypass the initialize in sriov for now 2016 */ 2017 if (amdgpu_sriov_vf(psp->adev)) 2018 return 0; 2019 2020 /* bypass dtm initialization if dmu is harvested */ 2021 if (!amdgpu_device_has_display_hardware(psp->adev)) 2022 return 0; 2023 2024 if (!psp->dtm_context.context.bin_desc.size_bytes || 2025 !psp->dtm_context.context.bin_desc.start_addr) { 2026 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 2027 return 0; 2028 } 2029 2030 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 2031 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2032 2033 if (!psp->dtm_context.context.mem_context.shared_buf) { 2034 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 2035 if (ret) 2036 return ret; 2037 } 2038 2039 ret = psp_ta_load(psp, &psp->dtm_context.context); 2040 if (!ret) { 2041 psp->dtm_context.context.initialized = true; 2042 mutex_init(&psp->dtm_context.mutex); 2043 } 2044 2045 return ret; 2046 } 2047 2048 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2049 { 2050 /* 2051 * TODO: bypass the loading in sriov for now 2052 */ 2053 if (amdgpu_sriov_vf(psp->adev)) 2054 return 0; 2055 2056 if (!psp->dtm_context.context.initialized) 2057 return 0; 2058 2059 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 2060 } 2061 2062 static int psp_dtm_terminate(struct psp_context *psp) 2063 { 2064 int ret; 2065 2066 /* 2067 * TODO: bypass the terminate in sriov for now 2068 */ 2069 if (amdgpu_sriov_vf(psp->adev)) 2070 return 0; 2071 2072 if (!psp->dtm_context.context.initialized) 2073 return 0; 2074 2075 ret = psp_ta_unload(psp, &psp->dtm_context.context); 2076 2077 psp->dtm_context.context.initialized = false; 2078 2079 return ret; 2080 } 2081 // DTM end 2082 2083 // RAP start 2084 static int psp_rap_initialize(struct psp_context *psp) 2085 { 2086 int ret; 2087 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 2088 2089 /* 2090 * TODO: bypass the initialize in sriov for now 2091 */ 2092 if (amdgpu_sriov_vf(psp->adev)) 2093 return 0; 2094 2095 if (!psp->rap_context.context.bin_desc.size_bytes || 2096 !psp->rap_context.context.bin_desc.start_addr) { 2097 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 2098 return 0; 2099 } 2100 2101 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 2102 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2103 2104 if (!psp->rap_context.context.mem_context.shared_buf) { 2105 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 2106 if (ret) 2107 return ret; 2108 } 2109 2110 ret = psp_ta_load(psp, &psp->rap_context.context); 2111 if (!ret) { 2112 psp->rap_context.context.initialized = true; 2113 mutex_init(&psp->rap_context.mutex); 2114 } else 2115 return ret; 2116 2117 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 2118 if (ret || status != TA_RAP_STATUS__SUCCESS) { 2119 psp_rap_terminate(psp); 2120 /* free rap shared memory */ 2121 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 2122 2123 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 2124 ret, status); 2125 2126 return ret; 2127 } 2128 2129 return 0; 2130 } 2131 2132 static int psp_rap_terminate(struct psp_context *psp) 2133 { 2134 int ret; 2135 2136 if (!psp->rap_context.context.initialized) 2137 return 0; 2138 2139 ret = psp_ta_unload(psp, &psp->rap_context.context); 2140 2141 psp->rap_context.context.initialized = false; 2142 2143 return ret; 2144 } 2145 2146 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 2147 { 2148 struct ta_rap_shared_memory *rap_cmd; 2149 int ret = 0; 2150 2151 if (!psp->rap_context.context.initialized) 2152 return 0; 2153 2154 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 2155 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 2156 return -EINVAL; 2157 2158 mutex_lock(&psp->rap_context.mutex); 2159 2160 rap_cmd = (struct ta_rap_shared_memory *) 2161 psp->rap_context.context.mem_context.shared_buf; 2162 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 2163 2164 rap_cmd->cmd_id = ta_cmd_id; 2165 rap_cmd->validation_method_id = METHOD_A; 2166 2167 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 2168 if (ret) 2169 goto out_unlock; 2170 2171 if (status) 2172 *status = rap_cmd->rap_status; 2173 2174 out_unlock: 2175 mutex_unlock(&psp->rap_context.mutex); 2176 2177 return ret; 2178 } 2179 // RAP end 2180 2181 /* securedisplay start */ 2182 static int psp_securedisplay_initialize(struct psp_context *psp) 2183 { 2184 int ret; 2185 struct ta_securedisplay_cmd *securedisplay_cmd; 2186 2187 /* 2188 * TODO: bypass the initialize in sriov for now 2189 */ 2190 if (amdgpu_sriov_vf(psp->adev)) 2191 return 0; 2192 2193 /* bypass securedisplay initialization if dmu is harvested */ 2194 if (!amdgpu_device_has_display_hardware(psp->adev)) 2195 return 0; 2196 2197 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 2198 !psp->securedisplay_context.context.bin_desc.start_addr) { 2199 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); 2200 return 0; 2201 } 2202 2203 psp->securedisplay_context.context.mem_context.shared_mem_size = 2204 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 2205 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 2206 2207 if (!psp->securedisplay_context.context.initialized) { 2208 ret = psp_ta_init_shared_buf(psp, 2209 &psp->securedisplay_context.context.mem_context); 2210 if (ret) 2211 return ret; 2212 } 2213 2214 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 2215 if (!ret) { 2216 psp->securedisplay_context.context.initialized = true; 2217 mutex_init(&psp->securedisplay_context.mutex); 2218 } else 2219 return ret; 2220 2221 mutex_lock(&psp->securedisplay_context.mutex); 2222 2223 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 2224 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2225 2226 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 2227 2228 mutex_unlock(&psp->securedisplay_context.mutex); 2229 2230 if (ret) { 2231 psp_securedisplay_terminate(psp); 2232 /* free securedisplay shared memory */ 2233 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 2234 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 2235 return -EINVAL; 2236 } 2237 2238 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 2239 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 2240 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 2241 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 2242 /* don't try again */ 2243 psp->securedisplay_context.context.bin_desc.size_bytes = 0; 2244 } 2245 2246 return 0; 2247 } 2248 2249 static int psp_securedisplay_terminate(struct psp_context *psp) 2250 { 2251 int ret; 2252 2253 /* 2254 * TODO:bypass the terminate in sriov for now 2255 */ 2256 if (amdgpu_sriov_vf(psp->adev)) 2257 return 0; 2258 2259 if (!psp->securedisplay_context.context.initialized) 2260 return 0; 2261 2262 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 2263 2264 psp->securedisplay_context.context.initialized = false; 2265 2266 return ret; 2267 } 2268 2269 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 2270 { 2271 int ret; 2272 2273 if (!psp->securedisplay_context.context.initialized) 2274 return -EINVAL; 2275 2276 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 2277 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC && 2278 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2) 2279 return -EINVAL; 2280 2281 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 2282 2283 return ret; 2284 } 2285 /* SECUREDISPLAY end */ 2286 2287 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev) 2288 { 2289 struct psp_context *psp = &adev->psp; 2290 int ret = 0; 2291 2292 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL) 2293 ret = psp->funcs->wait_for_bootloader(psp); 2294 2295 return ret; 2296 } 2297 2298 bool amdgpu_psp_get_ras_capability(struct psp_context *psp) 2299 { 2300 if (psp->funcs && 2301 psp->funcs->get_ras_capability) { 2302 return psp->funcs->get_ras_capability(psp); 2303 } else { 2304 return false; 2305 } 2306 } 2307 2308 bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) 2309 { 2310 struct psp_context *psp = &adev->psp; 2311 2312 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 2313 return false; 2314 2315 if (psp->funcs && psp->funcs->is_reload_needed) 2316 return psp->funcs->is_reload_needed(psp); 2317 2318 return false; 2319 } 2320 2321 static int psp_hw_start(struct psp_context *psp) 2322 { 2323 struct amdgpu_device *adev = psp->adev; 2324 int ret; 2325 2326 if (!amdgpu_sriov_vf(adev)) { 2327 if ((is_psp_fw_valid(psp->kdb)) && 2328 (psp->funcs->bootloader_load_kdb != NULL)) { 2329 ret = psp_bootloader_load_kdb(psp); 2330 if (ret) { 2331 dev_err(adev->dev, "PSP load kdb failed!\n"); 2332 return ret; 2333 } 2334 } 2335 2336 if ((is_psp_fw_valid(psp->spl)) && 2337 (psp->funcs->bootloader_load_spl != NULL)) { 2338 ret = psp_bootloader_load_spl(psp); 2339 if (ret) { 2340 dev_err(adev->dev, "PSP load spl failed!\n"); 2341 return ret; 2342 } 2343 } 2344 2345 if ((is_psp_fw_valid(psp->sys)) && 2346 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2347 ret = psp_bootloader_load_sysdrv(psp); 2348 if (ret) { 2349 dev_err(adev->dev, "PSP load sys drv failed!\n"); 2350 return ret; 2351 } 2352 } 2353 2354 if ((is_psp_fw_valid(psp->soc_drv)) && 2355 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2356 ret = psp_bootloader_load_soc_drv(psp); 2357 if (ret) { 2358 dev_err(adev->dev, "PSP load soc drv failed!\n"); 2359 return ret; 2360 } 2361 } 2362 2363 if ((is_psp_fw_valid(psp->intf_drv)) && 2364 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2365 ret = psp_bootloader_load_intf_drv(psp); 2366 if (ret) { 2367 dev_err(adev->dev, "PSP load intf drv failed!\n"); 2368 return ret; 2369 } 2370 } 2371 2372 if ((is_psp_fw_valid(psp->dbg_drv)) && 2373 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2374 ret = psp_bootloader_load_dbg_drv(psp); 2375 if (ret) { 2376 dev_err(adev->dev, "PSP load dbg drv failed!\n"); 2377 return ret; 2378 } 2379 } 2380 2381 if ((is_psp_fw_valid(psp->ras_drv)) && 2382 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2383 ret = psp_bootloader_load_ras_drv(psp); 2384 if (ret) { 2385 dev_err(adev->dev, "PSP load ras_drv failed!\n"); 2386 return ret; 2387 } 2388 } 2389 2390 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2391 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2392 ret = psp_bootloader_load_ipkeymgr_drv(psp); 2393 if (ret) { 2394 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2395 return ret; 2396 } 2397 } 2398 2399 if ((is_psp_fw_valid(psp->spdm_drv)) && 2400 (psp->funcs->bootloader_load_spdm_drv != NULL)) { 2401 ret = psp_bootloader_load_spdm_drv(psp); 2402 if (ret) { 2403 dev_err(adev->dev, "PSP load spdm_drv failed!\n"); 2404 return ret; 2405 } 2406 } 2407 2408 if ((is_psp_fw_valid(psp->sos)) && 2409 (psp->funcs->bootloader_load_sos != NULL)) { 2410 ret = psp_bootloader_load_sos(psp); 2411 if (ret) { 2412 dev_err(adev->dev, "PSP load sos failed!\n"); 2413 return ret; 2414 } 2415 } 2416 } 2417 2418 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2419 if (ret) { 2420 dev_err(adev->dev, "PSP create ring failed!\n"); 2421 return ret; 2422 } 2423 2424 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2425 goto skip_pin_bo; 2426 2427 if (!psp->boot_time_tmr || psp->autoload_supported) { 2428 ret = psp_tmr_init(psp); 2429 if (ret) { 2430 dev_err(adev->dev, "PSP tmr init failed!\n"); 2431 return ret; 2432 } 2433 } 2434 2435 skip_pin_bo: 2436 /* 2437 * For ASICs with DF Cstate management centralized 2438 * to PMFW, TMR setup should be performed after PMFW 2439 * loaded and before other non-psp firmware loaded. 2440 */ 2441 if (psp->pmfw_centralized_cstate_management) { 2442 ret = psp_load_smu_fw(psp); 2443 if (ret) 2444 return ret; 2445 } 2446 2447 if (!psp->boot_time_tmr || !psp->autoload_supported) { 2448 ret = psp_tmr_load(psp); 2449 if (ret) { 2450 dev_err(adev->dev, "PSP load tmr failed!\n"); 2451 return ret; 2452 } 2453 } 2454 2455 return 0; 2456 } 2457 2458 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2459 enum psp_gfx_fw_type *type) 2460 { 2461 switch (ucode->ucode_id) { 2462 case AMDGPU_UCODE_ID_CAP: 2463 *type = GFX_FW_TYPE_CAP; 2464 break; 2465 case AMDGPU_UCODE_ID_SDMA0: 2466 *type = GFX_FW_TYPE_SDMA0; 2467 break; 2468 case AMDGPU_UCODE_ID_SDMA1: 2469 *type = GFX_FW_TYPE_SDMA1; 2470 break; 2471 case AMDGPU_UCODE_ID_SDMA2: 2472 *type = GFX_FW_TYPE_SDMA2; 2473 break; 2474 case AMDGPU_UCODE_ID_SDMA3: 2475 *type = GFX_FW_TYPE_SDMA3; 2476 break; 2477 case AMDGPU_UCODE_ID_SDMA4: 2478 *type = GFX_FW_TYPE_SDMA4; 2479 break; 2480 case AMDGPU_UCODE_ID_SDMA5: 2481 *type = GFX_FW_TYPE_SDMA5; 2482 break; 2483 case AMDGPU_UCODE_ID_SDMA6: 2484 *type = GFX_FW_TYPE_SDMA6; 2485 break; 2486 case AMDGPU_UCODE_ID_SDMA7: 2487 *type = GFX_FW_TYPE_SDMA7; 2488 break; 2489 case AMDGPU_UCODE_ID_CP_MES: 2490 *type = GFX_FW_TYPE_CP_MES; 2491 break; 2492 case AMDGPU_UCODE_ID_CP_MES_DATA: 2493 *type = GFX_FW_TYPE_MES_STACK; 2494 break; 2495 case AMDGPU_UCODE_ID_CP_MES1: 2496 *type = GFX_FW_TYPE_CP_MES_KIQ; 2497 break; 2498 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2499 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2500 break; 2501 case AMDGPU_UCODE_ID_CP_CE: 2502 *type = GFX_FW_TYPE_CP_CE; 2503 break; 2504 case AMDGPU_UCODE_ID_CP_PFP: 2505 *type = GFX_FW_TYPE_CP_PFP; 2506 break; 2507 case AMDGPU_UCODE_ID_CP_ME: 2508 *type = GFX_FW_TYPE_CP_ME; 2509 break; 2510 case AMDGPU_UCODE_ID_CP_MEC1: 2511 *type = GFX_FW_TYPE_CP_MEC; 2512 break; 2513 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2514 *type = GFX_FW_TYPE_CP_MEC_ME1; 2515 break; 2516 case AMDGPU_UCODE_ID_CP_MEC2: 2517 *type = GFX_FW_TYPE_CP_MEC; 2518 break; 2519 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2520 *type = GFX_FW_TYPE_CP_MEC_ME2; 2521 break; 2522 case AMDGPU_UCODE_ID_RLC_P: 2523 *type = GFX_FW_TYPE_RLC_P; 2524 break; 2525 case AMDGPU_UCODE_ID_RLC_V: 2526 *type = GFX_FW_TYPE_RLC_V; 2527 break; 2528 case AMDGPU_UCODE_ID_RLC_G: 2529 *type = GFX_FW_TYPE_RLC_G; 2530 break; 2531 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2532 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2533 break; 2534 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2535 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2536 break; 2537 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2538 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2539 break; 2540 case AMDGPU_UCODE_ID_RLC_IRAM: 2541 *type = GFX_FW_TYPE_RLC_IRAM; 2542 break; 2543 case AMDGPU_UCODE_ID_RLC_DRAM: 2544 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2545 break; 2546 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2547 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2548 break; 2549 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2550 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2551 break; 2552 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2553 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2554 break; 2555 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2556 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2557 break; 2558 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2559 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2560 break; 2561 case AMDGPU_UCODE_ID_SMC: 2562 *type = GFX_FW_TYPE_SMU; 2563 break; 2564 case AMDGPU_UCODE_ID_PPTABLE: 2565 *type = GFX_FW_TYPE_PPTABLE; 2566 break; 2567 case AMDGPU_UCODE_ID_UVD: 2568 *type = GFX_FW_TYPE_UVD; 2569 break; 2570 case AMDGPU_UCODE_ID_UVD1: 2571 *type = GFX_FW_TYPE_UVD1; 2572 break; 2573 case AMDGPU_UCODE_ID_VCE: 2574 *type = GFX_FW_TYPE_VCE; 2575 break; 2576 case AMDGPU_UCODE_ID_VCN: 2577 *type = GFX_FW_TYPE_VCN; 2578 break; 2579 case AMDGPU_UCODE_ID_VCN1: 2580 *type = GFX_FW_TYPE_VCN1; 2581 break; 2582 case AMDGPU_UCODE_ID_DMCU_ERAM: 2583 *type = GFX_FW_TYPE_DMCU_ERAM; 2584 break; 2585 case AMDGPU_UCODE_ID_DMCU_INTV: 2586 *type = GFX_FW_TYPE_DMCU_ISR; 2587 break; 2588 case AMDGPU_UCODE_ID_VCN0_RAM: 2589 *type = GFX_FW_TYPE_VCN0_RAM; 2590 break; 2591 case AMDGPU_UCODE_ID_VCN1_RAM: 2592 *type = GFX_FW_TYPE_VCN1_RAM; 2593 break; 2594 case AMDGPU_UCODE_ID_DMCUB: 2595 *type = GFX_FW_TYPE_DMUB; 2596 break; 2597 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2598 case AMDGPU_UCODE_ID_SDMA_RS64: 2599 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2600 break; 2601 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2602 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2603 break; 2604 case AMDGPU_UCODE_ID_IMU_I: 2605 *type = GFX_FW_TYPE_IMU_I; 2606 break; 2607 case AMDGPU_UCODE_ID_IMU_D: 2608 *type = GFX_FW_TYPE_IMU_D; 2609 break; 2610 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2611 *type = GFX_FW_TYPE_RS64_PFP; 2612 break; 2613 case AMDGPU_UCODE_ID_CP_RS64_ME: 2614 *type = GFX_FW_TYPE_RS64_ME; 2615 break; 2616 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2617 *type = GFX_FW_TYPE_RS64_MEC; 2618 break; 2619 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2620 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2621 break; 2622 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2623 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2624 break; 2625 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2626 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2627 break; 2628 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2629 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2630 break; 2631 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2632 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2633 break; 2634 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2635 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2636 break; 2637 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2638 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2639 break; 2640 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2641 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2642 break; 2643 case AMDGPU_UCODE_ID_VPE_CTX: 2644 *type = GFX_FW_TYPE_VPEC_FW1; 2645 break; 2646 case AMDGPU_UCODE_ID_VPE_CTL: 2647 *type = GFX_FW_TYPE_VPEC_FW2; 2648 break; 2649 case AMDGPU_UCODE_ID_VPE: 2650 *type = GFX_FW_TYPE_VPE; 2651 break; 2652 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: 2653 *type = GFX_FW_TYPE_UMSCH_UCODE; 2654 break; 2655 case AMDGPU_UCODE_ID_UMSCH_MM_DATA: 2656 *type = GFX_FW_TYPE_UMSCH_DATA; 2657 break; 2658 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: 2659 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER; 2660 break; 2661 case AMDGPU_UCODE_ID_P2S_TABLE: 2662 *type = GFX_FW_TYPE_P2S_TABLE; 2663 break; 2664 case AMDGPU_UCODE_ID_JPEG_RAM: 2665 *type = GFX_FW_TYPE_JPEG_RAM; 2666 break; 2667 case AMDGPU_UCODE_ID_ISP: 2668 *type = GFX_FW_TYPE_ISP; 2669 break; 2670 case AMDGPU_UCODE_ID_MAXIMUM: 2671 default: 2672 return -EINVAL; 2673 } 2674 2675 return 0; 2676 } 2677 2678 static void psp_print_fw_hdr(struct psp_context *psp, 2679 struct amdgpu_firmware_info *ucode) 2680 { 2681 struct amdgpu_device *adev = psp->adev; 2682 struct common_firmware_header *hdr; 2683 2684 switch (ucode->ucode_id) { 2685 case AMDGPU_UCODE_ID_SDMA0: 2686 case AMDGPU_UCODE_ID_SDMA1: 2687 case AMDGPU_UCODE_ID_SDMA2: 2688 case AMDGPU_UCODE_ID_SDMA3: 2689 case AMDGPU_UCODE_ID_SDMA4: 2690 case AMDGPU_UCODE_ID_SDMA5: 2691 case AMDGPU_UCODE_ID_SDMA6: 2692 case AMDGPU_UCODE_ID_SDMA7: 2693 hdr = (struct common_firmware_header *) 2694 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2695 amdgpu_ucode_print_sdma_hdr(hdr); 2696 break; 2697 case AMDGPU_UCODE_ID_CP_CE: 2698 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2699 amdgpu_ucode_print_gfx_hdr(hdr); 2700 break; 2701 case AMDGPU_UCODE_ID_CP_PFP: 2702 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2703 amdgpu_ucode_print_gfx_hdr(hdr); 2704 break; 2705 case AMDGPU_UCODE_ID_CP_ME: 2706 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2707 amdgpu_ucode_print_gfx_hdr(hdr); 2708 break; 2709 case AMDGPU_UCODE_ID_CP_MEC1: 2710 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2711 amdgpu_ucode_print_gfx_hdr(hdr); 2712 break; 2713 case AMDGPU_UCODE_ID_RLC_G: 2714 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2715 amdgpu_ucode_print_rlc_hdr(hdr); 2716 break; 2717 case AMDGPU_UCODE_ID_SMC: 2718 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2719 amdgpu_ucode_print_smc_hdr(hdr); 2720 break; 2721 default: 2722 break; 2723 } 2724 } 2725 2726 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp, 2727 struct amdgpu_firmware_info *ucode, 2728 struct psp_gfx_cmd_resp *cmd) 2729 { 2730 int ret; 2731 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2732 2733 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2734 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2735 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2736 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2737 2738 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2739 if (ret) 2740 dev_err(psp->adev->dev, "Unknown firmware type\n"); 2741 2742 return ret; 2743 } 2744 2745 int psp_execute_ip_fw_load(struct psp_context *psp, 2746 struct amdgpu_firmware_info *ucode) 2747 { 2748 int ret = 0; 2749 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2750 2751 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd); 2752 if (!ret) { 2753 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2754 psp->fence_buf_mc_addr); 2755 } 2756 2757 release_psp_cmd_buf(psp); 2758 2759 return ret; 2760 } 2761 2762 static int psp_load_p2s_table(struct psp_context *psp) 2763 { 2764 int ret; 2765 struct amdgpu_device *adev = psp->adev; 2766 struct amdgpu_firmware_info *ucode = 2767 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; 2768 2769 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2770 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2771 return 0; 2772 2773 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 2774 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 2775 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D : 2776 0x0036003C; 2777 if (psp->sos.fw_version < supp_vers) 2778 return 0; 2779 } 2780 2781 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2782 return 0; 2783 2784 ret = psp_execute_ip_fw_load(psp, ucode); 2785 2786 return ret; 2787 } 2788 2789 static int psp_load_smu_fw(struct psp_context *psp) 2790 { 2791 int ret; 2792 struct amdgpu_device *adev = psp->adev; 2793 struct amdgpu_firmware_info *ucode = 2794 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2795 struct amdgpu_ras *ras = psp->ras_context.ras; 2796 2797 /* 2798 * Skip SMU FW reloading in case of using BACO for runpm only, 2799 * as SMU is always alive. 2800 */ 2801 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || 2802 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) 2803 return 0; 2804 2805 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2806 return 0; 2807 2808 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2809 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2810 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2811 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2812 if (ret) 2813 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n"); 2814 } 2815 2816 ret = psp_execute_ip_fw_load(psp, ucode); 2817 2818 if (ret) 2819 dev_err(adev->dev, "PSP load smu failed!\n"); 2820 2821 return ret; 2822 } 2823 2824 static bool fw_load_skip_check(struct psp_context *psp, 2825 struct amdgpu_firmware_info *ucode) 2826 { 2827 if (!ucode->fw || !ucode->ucode_size) 2828 return true; 2829 2830 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE) 2831 return true; 2832 2833 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2834 (psp_smu_reload_quirk(psp) || 2835 psp->autoload_supported || 2836 psp->pmfw_centralized_cstate_management)) 2837 return true; 2838 2839 if (amdgpu_sriov_vf(psp->adev) && 2840 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2841 return true; 2842 2843 if (psp->autoload_supported && 2844 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2845 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2846 /* skip mec JT when autoload is enabled */ 2847 return true; 2848 2849 return false; 2850 } 2851 2852 int psp_load_fw_list(struct psp_context *psp, 2853 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2854 { 2855 int ret = 0, i; 2856 struct amdgpu_firmware_info *ucode; 2857 2858 for (i = 0; i < ucode_count; ++i) { 2859 ucode = ucode_list[i]; 2860 psp_print_fw_hdr(psp, ucode); 2861 ret = psp_execute_ip_fw_load(psp, ucode); 2862 if (ret) 2863 return ret; 2864 } 2865 return ret; 2866 } 2867 2868 static int psp_load_non_psp_fw(struct psp_context *psp) 2869 { 2870 int i, ret; 2871 struct amdgpu_firmware_info *ucode; 2872 struct amdgpu_device *adev = psp->adev; 2873 2874 if (psp->autoload_supported && 2875 !psp->pmfw_centralized_cstate_management) { 2876 ret = psp_load_smu_fw(psp); 2877 if (ret) 2878 return ret; 2879 } 2880 2881 /* Load P2S table first if it's available */ 2882 psp_load_p2s_table(psp); 2883 2884 for (i = 0; i < adev->firmware.max_ucodes; i++) { 2885 ucode = &adev->firmware.ucode[i]; 2886 2887 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2888 !fw_load_skip_check(psp, ucode)) { 2889 ret = psp_load_smu_fw(psp); 2890 if (ret) 2891 return ret; 2892 continue; 2893 } 2894 2895 if (fw_load_skip_check(psp, ucode)) 2896 continue; 2897 2898 if (psp->autoload_supported && 2899 (amdgpu_ip_version(adev, MP0_HWIP, 0) == 2900 IP_VERSION(11, 0, 7) || 2901 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2902 IP_VERSION(11, 0, 11) || 2903 amdgpu_ip_version(adev, MP0_HWIP, 0) == 2904 IP_VERSION(11, 0, 12)) && 2905 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2906 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2907 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 2908 /* PSP only receive one SDMA fw for sienna_cichlid, 2909 * as all four sdma fw are same 2910 */ 2911 continue; 2912 2913 psp_print_fw_hdr(psp, ucode); 2914 2915 ret = psp_execute_ip_fw_load(psp, ucode); 2916 if (ret) 2917 return ret; 2918 2919 /* Start rlc autoload after psp received all the gfx firmware */ 2920 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2921 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 2922 ret = psp_rlc_autoload_start(psp); 2923 if (ret) { 2924 dev_err(adev->dev, "Failed to start rlc autoload\n"); 2925 return ret; 2926 } 2927 } 2928 } 2929 2930 return 0; 2931 } 2932 2933 static int psp_load_fw(struct amdgpu_device *adev) 2934 { 2935 int ret; 2936 struct psp_context *psp = &adev->psp; 2937 2938 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2939 /* should not destroy ring, only stop */ 2940 psp_ring_stop(psp, PSP_RING_TYPE__KM); 2941 } else { 2942 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2943 2944 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2945 if (ret) { 2946 dev_err(adev->dev, "PSP ring init failed!\n"); 2947 goto failed; 2948 } 2949 } 2950 2951 ret = psp_hw_start(psp); 2952 if (ret) 2953 goto failed; 2954 2955 ret = psp_load_non_psp_fw(psp); 2956 if (ret) 2957 goto failed1; 2958 2959 ret = psp_asd_initialize(psp); 2960 if (ret) { 2961 dev_err(adev->dev, "PSP load asd failed!\n"); 2962 goto failed1; 2963 } 2964 2965 ret = psp_rl_load(adev); 2966 if (ret) { 2967 dev_err(adev->dev, "PSP load RL failed!\n"); 2968 goto failed1; 2969 } 2970 2971 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2972 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2973 ret = psp_xgmi_initialize(psp, false, true); 2974 /* Warning the XGMI seesion initialize failure 2975 * Instead of stop driver initialization 2976 */ 2977 if (ret) 2978 dev_err(psp->adev->dev, 2979 "XGMI: Failed to initialize XGMI session\n"); 2980 } 2981 } 2982 2983 if (psp->ta_fw) { 2984 ret = psp_ras_initialize(psp); 2985 if (ret) 2986 dev_err(psp->adev->dev, 2987 "RAS: Failed to initialize RAS\n"); 2988 2989 ret = psp_hdcp_initialize(psp); 2990 if (ret) 2991 dev_err(psp->adev->dev, 2992 "HDCP: Failed to initialize HDCP\n"); 2993 2994 ret = psp_dtm_initialize(psp); 2995 if (ret) 2996 dev_err(psp->adev->dev, 2997 "DTM: Failed to initialize DTM\n"); 2998 2999 ret = psp_rap_initialize(psp); 3000 if (ret) 3001 dev_err(psp->adev->dev, 3002 "RAP: Failed to initialize RAP\n"); 3003 3004 ret = psp_securedisplay_initialize(psp); 3005 if (ret) 3006 dev_err(psp->adev->dev, 3007 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3008 } 3009 3010 return 0; 3011 3012 failed1: 3013 psp_free_shared_bufs(psp); 3014 failed: 3015 /* 3016 * all cleanup jobs (xgmi terminate, ras terminate, 3017 * ring destroy, cmd/fence/fw buffers destory, 3018 * psp->cmd destory) are delayed to psp_hw_fini 3019 */ 3020 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3021 return ret; 3022 } 3023 3024 static int psp_hw_init(struct amdgpu_ip_block *ip_block) 3025 { 3026 int ret; 3027 struct amdgpu_device *adev = ip_block->adev; 3028 3029 mutex_lock(&adev->firmware.mutex); 3030 3031 ret = amdgpu_ucode_init_bo(adev); 3032 if (ret) 3033 goto failed; 3034 3035 ret = psp_load_fw(adev); 3036 if (ret) { 3037 dev_err(adev->dev, "PSP firmware loading failed\n"); 3038 goto failed; 3039 } 3040 3041 mutex_unlock(&adev->firmware.mutex); 3042 return 0; 3043 3044 failed: 3045 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 3046 mutex_unlock(&adev->firmware.mutex); 3047 return -EINVAL; 3048 } 3049 3050 static int psp_hw_fini(struct amdgpu_ip_block *ip_block) 3051 { 3052 struct amdgpu_device *adev = ip_block->adev; 3053 struct psp_context *psp = &adev->psp; 3054 3055 if (psp->ta_fw) { 3056 psp_ras_terminate(psp); 3057 psp_securedisplay_terminate(psp); 3058 psp_rap_terminate(psp); 3059 psp_dtm_terminate(psp); 3060 psp_hdcp_terminate(psp); 3061 3062 if (adev->gmc.xgmi.num_physical_nodes > 1) 3063 psp_xgmi_terminate(psp); 3064 } 3065 3066 psp_asd_terminate(psp); 3067 psp_tmr_terminate(psp); 3068 3069 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 3070 3071 return 0; 3072 } 3073 3074 static int psp_suspend(struct amdgpu_ip_block *ip_block) 3075 { 3076 int ret = 0; 3077 struct amdgpu_device *adev = ip_block->adev; 3078 struct psp_context *psp = &adev->psp; 3079 3080 if (adev->gmc.xgmi.num_physical_nodes > 1 && 3081 psp->xgmi_context.context.initialized) { 3082 ret = psp_xgmi_terminate(psp); 3083 if (ret) { 3084 dev_err(adev->dev, "Failed to terminate xgmi ta\n"); 3085 goto out; 3086 } 3087 } 3088 3089 if (psp->ta_fw) { 3090 ret = psp_ras_terminate(psp); 3091 if (ret) { 3092 dev_err(adev->dev, "Failed to terminate ras ta\n"); 3093 goto out; 3094 } 3095 ret = psp_hdcp_terminate(psp); 3096 if (ret) { 3097 dev_err(adev->dev, "Failed to terminate hdcp ta\n"); 3098 goto out; 3099 } 3100 ret = psp_dtm_terminate(psp); 3101 if (ret) { 3102 dev_err(adev->dev, "Failed to terminate dtm ta\n"); 3103 goto out; 3104 } 3105 ret = psp_rap_terminate(psp); 3106 if (ret) { 3107 dev_err(adev->dev, "Failed to terminate rap ta\n"); 3108 goto out; 3109 } 3110 ret = psp_securedisplay_terminate(psp); 3111 if (ret) { 3112 dev_err(adev->dev, "Failed to terminate securedisplay ta\n"); 3113 goto out; 3114 } 3115 } 3116 3117 ret = psp_asd_terminate(psp); 3118 if (ret) { 3119 dev_err(adev->dev, "Failed to terminate asd\n"); 3120 goto out; 3121 } 3122 3123 ret = psp_tmr_terminate(psp); 3124 if (ret) { 3125 dev_err(adev->dev, "Failed to terminate tmr\n"); 3126 goto out; 3127 } 3128 3129 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 3130 if (ret) 3131 dev_err(adev->dev, "PSP ring stop failed\n"); 3132 3133 out: 3134 return ret; 3135 } 3136 3137 static int psp_resume(struct amdgpu_ip_block *ip_block) 3138 { 3139 int ret; 3140 struct amdgpu_device *adev = ip_block->adev; 3141 struct psp_context *psp = &adev->psp; 3142 3143 dev_info(adev->dev, "PSP is resuming...\n"); 3144 3145 if (psp->mem_train_ctx.enable_mem_training) { 3146 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 3147 if (ret) { 3148 dev_err(adev->dev, "Failed to process memory training!\n"); 3149 return ret; 3150 } 3151 } 3152 3153 mutex_lock(&adev->firmware.mutex); 3154 3155 ret = amdgpu_ucode_init_bo(adev); 3156 if (ret) 3157 goto failed; 3158 3159 ret = psp_hw_start(psp); 3160 if (ret) 3161 goto failed; 3162 3163 ret = psp_load_non_psp_fw(psp); 3164 if (ret) 3165 goto failed; 3166 3167 ret = psp_asd_initialize(psp); 3168 if (ret) { 3169 dev_err(adev->dev, "PSP load asd failed!\n"); 3170 goto failed; 3171 } 3172 3173 ret = psp_rl_load(adev); 3174 if (ret) { 3175 dev_err(adev->dev, "PSP load RL failed!\n"); 3176 goto failed; 3177 } 3178 3179 if (adev->gmc.xgmi.num_physical_nodes > 1) { 3180 ret = psp_xgmi_initialize(psp, false, true); 3181 /* Warning the XGMI seesion initialize failure 3182 * Instead of stop driver initialization 3183 */ 3184 if (ret) 3185 dev_err(psp->adev->dev, 3186 "XGMI: Failed to initialize XGMI session\n"); 3187 } 3188 3189 if (psp->ta_fw) { 3190 ret = psp_ras_initialize(psp); 3191 if (ret) 3192 dev_err(psp->adev->dev, 3193 "RAS: Failed to initialize RAS\n"); 3194 3195 ret = psp_hdcp_initialize(psp); 3196 if (ret) 3197 dev_err(psp->adev->dev, 3198 "HDCP: Failed to initialize HDCP\n"); 3199 3200 ret = psp_dtm_initialize(psp); 3201 if (ret) 3202 dev_err(psp->adev->dev, 3203 "DTM: Failed to initialize DTM\n"); 3204 3205 ret = psp_rap_initialize(psp); 3206 if (ret) 3207 dev_err(psp->adev->dev, 3208 "RAP: Failed to initialize RAP\n"); 3209 3210 ret = psp_securedisplay_initialize(psp); 3211 if (ret) 3212 dev_err(psp->adev->dev, 3213 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 3214 } 3215 3216 mutex_unlock(&adev->firmware.mutex); 3217 3218 return 0; 3219 3220 failed: 3221 dev_err(adev->dev, "PSP resume failed\n"); 3222 mutex_unlock(&adev->firmware.mutex); 3223 return ret; 3224 } 3225 3226 int psp_gpu_reset(struct amdgpu_device *adev) 3227 { 3228 int ret; 3229 3230 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 3231 return 0; 3232 3233 mutex_lock(&adev->psp.mutex); 3234 ret = psp_mode1_reset(&adev->psp); 3235 mutex_unlock(&adev->psp.mutex); 3236 3237 return ret; 3238 } 3239 3240 int psp_rlc_autoload_start(struct psp_context *psp) 3241 { 3242 int ret; 3243 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3244 3245 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 3246 3247 ret = psp_cmd_submit_buf(psp, NULL, cmd, 3248 psp->fence_buf_mc_addr); 3249 3250 release_psp_cmd_buf(psp); 3251 3252 return ret; 3253 } 3254 3255 int psp_ring_cmd_submit(struct psp_context *psp, 3256 uint64_t cmd_buf_mc_addr, 3257 uint64_t fence_mc_addr, 3258 int index) 3259 { 3260 unsigned int psp_write_ptr_reg = 0; 3261 struct psp_gfx_rb_frame *write_frame; 3262 struct psp_ring *ring = &psp->km_ring; 3263 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 3264 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 3265 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 3266 struct amdgpu_device *adev = psp->adev; 3267 uint32_t ring_size_dw = ring->ring_size / 4; 3268 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 3269 3270 /* KM (GPCOM) prepare write pointer */ 3271 psp_write_ptr_reg = psp_ring_get_wptr(psp); 3272 3273 /* Update KM RB frame pointer to new frame */ 3274 /* write_frame ptr increments by size of rb_frame in bytes */ 3275 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 3276 if ((psp_write_ptr_reg % ring_size_dw) == 0) 3277 write_frame = ring_buffer_start; 3278 else 3279 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 3280 /* Check invalid write_frame ptr address */ 3281 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 3282 dev_err(adev->dev, 3283 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 3284 ring_buffer_start, ring_buffer_end, write_frame); 3285 dev_err(adev->dev, 3286 "write_frame is pointing to address out of bounds\n"); 3287 return -EINVAL; 3288 } 3289 3290 /* Initialize KM RB frame */ 3291 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 3292 3293 /* Update KM RB frame */ 3294 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 3295 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 3296 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 3297 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 3298 write_frame->fence_value = index; 3299 amdgpu_device_flush_hdp(adev, NULL); 3300 3301 /* Update the write Pointer in DWORDs */ 3302 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 3303 psp_ring_set_wptr(psp, psp_write_ptr_reg); 3304 return 0; 3305 } 3306 3307 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 3308 { 3309 struct amdgpu_device *adev = psp->adev; 3310 const struct psp_firmware_header_v1_0 *asd_hdr; 3311 int err = 0; 3312 3313 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED, 3314 "amdgpu/%s_asd.bin", chip_name); 3315 if (err) 3316 goto out; 3317 3318 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 3319 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 3320 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 3321 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 3322 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 3323 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 3324 return 0; 3325 out: 3326 amdgpu_ucode_release(&adev->psp.asd_fw); 3327 return err; 3328 } 3329 3330 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 3331 { 3332 struct amdgpu_device *adev = psp->adev; 3333 const struct psp_firmware_header_v1_0 *toc_hdr; 3334 int err = 0; 3335 3336 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED, 3337 "amdgpu/%s_toc.bin", chip_name); 3338 if (err) 3339 goto out; 3340 3341 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 3342 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 3343 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 3344 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 3345 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 3346 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 3347 return 0; 3348 out: 3349 amdgpu_ucode_release(&adev->psp.toc_fw); 3350 return err; 3351 } 3352 3353 static int parse_sos_bin_descriptor(struct psp_context *psp, 3354 const struct psp_fw_bin_desc *desc, 3355 const struct psp_firmware_header_v2_0 *sos_hdr) 3356 { 3357 uint8_t *ucode_start_addr = NULL; 3358 3359 if (!psp || !desc || !sos_hdr) 3360 return -EINVAL; 3361 3362 ucode_start_addr = (uint8_t *)sos_hdr + 3363 le32_to_cpu(desc->offset_bytes) + 3364 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3365 3366 switch (desc->fw_type) { 3367 case PSP_FW_TYPE_PSP_SOS: 3368 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 3369 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 3370 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 3371 psp->sos.start_addr = ucode_start_addr; 3372 break; 3373 case PSP_FW_TYPE_PSP_SYS_DRV: 3374 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 3375 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 3376 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 3377 psp->sys.start_addr = ucode_start_addr; 3378 break; 3379 case PSP_FW_TYPE_PSP_KDB: 3380 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 3381 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 3382 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 3383 psp->kdb.start_addr = ucode_start_addr; 3384 break; 3385 case PSP_FW_TYPE_PSP_TOC: 3386 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 3387 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 3388 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 3389 psp->toc.start_addr = ucode_start_addr; 3390 break; 3391 case PSP_FW_TYPE_PSP_SPL: 3392 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 3393 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 3394 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3395 psp->spl.start_addr = ucode_start_addr; 3396 break; 3397 case PSP_FW_TYPE_PSP_RL: 3398 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3399 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3400 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3401 psp->rl.start_addr = ucode_start_addr; 3402 break; 3403 case PSP_FW_TYPE_PSP_SOC_DRV: 3404 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3405 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3406 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3407 psp->soc_drv.start_addr = ucode_start_addr; 3408 break; 3409 case PSP_FW_TYPE_PSP_INTF_DRV: 3410 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3411 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3412 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3413 psp->intf_drv.start_addr = ucode_start_addr; 3414 break; 3415 case PSP_FW_TYPE_PSP_DBG_DRV: 3416 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3417 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3418 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3419 psp->dbg_drv.start_addr = ucode_start_addr; 3420 break; 3421 case PSP_FW_TYPE_PSP_RAS_DRV: 3422 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3423 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3424 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3425 psp->ras_drv.start_addr = ucode_start_addr; 3426 break; 3427 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3428 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3429 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3430 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3431 psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3432 break; 3433 case PSP_FW_TYPE_PSP_SPDM_DRV: 3434 psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version); 3435 psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version); 3436 psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3437 psp->spdm_drv.start_addr = ucode_start_addr; 3438 break; 3439 default: 3440 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3441 break; 3442 } 3443 3444 return 0; 3445 } 3446 3447 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3448 { 3449 const struct psp_firmware_header_v1_0 *sos_hdr; 3450 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3451 uint8_t *ucode_array_start_addr; 3452 3453 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3454 ucode_array_start_addr = (uint8_t *)sos_hdr + 3455 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3456 3457 if (adev->gmc.xgmi.connected_to_cpu || 3458 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3459 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3460 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3461 3462 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3463 adev->psp.sys.start_addr = ucode_array_start_addr; 3464 3465 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3466 adev->psp.sos.start_addr = ucode_array_start_addr + 3467 le32_to_cpu(sos_hdr->sos.offset_bytes); 3468 } else { 3469 /* Load alternate PSP SOS FW */ 3470 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3471 3472 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3473 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3474 3475 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3476 adev->psp.sys.start_addr = ucode_array_start_addr + 3477 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3478 3479 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3480 adev->psp.sos.start_addr = ucode_array_start_addr + 3481 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3482 } 3483 3484 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3485 dev_warn(adev->dev, "PSP SOS FW not available"); 3486 return -EINVAL; 3487 } 3488 3489 return 0; 3490 } 3491 3492 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3493 { 3494 struct amdgpu_device *adev = psp->adev; 3495 const struct psp_firmware_header_v1_0 *sos_hdr; 3496 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3497 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3498 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3499 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3500 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; 3501 int fw_index, fw_bin_count, start_index = 0; 3502 const struct psp_fw_bin_desc *fw_bin; 3503 uint8_t *ucode_array_start_addr; 3504 int err = 0; 3505 3506 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, 3507 "amdgpu/%s_sos.bin", chip_name); 3508 if (err) 3509 goto out; 3510 3511 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3512 ucode_array_start_addr = (uint8_t *)sos_hdr + 3513 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3514 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3515 3516 switch (sos_hdr->header.header_version_major) { 3517 case 1: 3518 err = psp_init_sos_base_fw(adev); 3519 if (err) 3520 goto out; 3521 3522 if (sos_hdr->header.header_version_minor == 1) { 3523 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3524 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3525 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3526 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3527 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3528 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3529 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3530 } 3531 if (sos_hdr->header.header_version_minor == 2) { 3532 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3533 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3534 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3535 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3536 } 3537 if (sos_hdr->header.header_version_minor == 3) { 3538 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3539 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3540 adev->psp.toc.start_addr = ucode_array_start_addr + 3541 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3542 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3543 adev->psp.kdb.start_addr = ucode_array_start_addr + 3544 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3545 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3546 adev->psp.spl.start_addr = ucode_array_start_addr + 3547 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3548 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3549 adev->psp.rl.start_addr = ucode_array_start_addr + 3550 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3551 } 3552 break; 3553 case 2: 3554 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3555 3556 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); 3557 3558 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { 3559 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3560 err = -EINVAL; 3561 goto out; 3562 } 3563 3564 if (sos_hdr_v2_0->header.header_version_minor == 1) { 3565 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; 3566 3567 fw_bin = sos_hdr_v2_1->psp_fw_bin; 3568 3569 if (psp_is_aux_sos_load_required(psp)) 3570 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3571 else 3572 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); 3573 3574 } else { 3575 fw_bin = sos_hdr_v2_0->psp_fw_bin; 3576 } 3577 3578 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { 3579 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, 3580 sos_hdr_v2_0); 3581 if (err) 3582 goto out; 3583 } 3584 break; 3585 default: 3586 dev_err(adev->dev, 3587 "unsupported psp sos firmware\n"); 3588 err = -EINVAL; 3589 goto out; 3590 } 3591 3592 return 0; 3593 out: 3594 amdgpu_ucode_release(&adev->psp.sos_fw); 3595 3596 return err; 3597 } 3598 3599 static bool is_ta_fw_applicable(struct psp_context *psp, 3600 const struct psp_fw_bin_desc *desc) 3601 { 3602 struct amdgpu_device *adev = psp->adev; 3603 uint32_t fw_version; 3604 3605 switch (desc->fw_type) { 3606 case TA_FW_TYPE_PSP_XGMI: 3607 case TA_FW_TYPE_PSP_XGMI_AUX: 3608 /* for now, AUX TA only exists on 13.0.6 ta bin, 3609 * from v20.00.0x.14 3610 */ 3611 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == 3612 IP_VERSION(13, 0, 6)) { 3613 fw_version = le32_to_cpu(desc->fw_version); 3614 3615 if (adev->flags & AMD_IS_APU && 3616 (fw_version & 0xff) >= 0x14) 3617 return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX; 3618 else 3619 return desc->fw_type == TA_FW_TYPE_PSP_XGMI; 3620 } 3621 break; 3622 default: 3623 break; 3624 } 3625 3626 return true; 3627 } 3628 3629 static int parse_ta_bin_descriptor(struct psp_context *psp, 3630 const struct psp_fw_bin_desc *desc, 3631 const struct ta_firmware_header_v2_0 *ta_hdr) 3632 { 3633 uint8_t *ucode_start_addr = NULL; 3634 3635 if (!psp || !desc || !ta_hdr) 3636 return -EINVAL; 3637 3638 if (!is_ta_fw_applicable(psp, desc)) 3639 return 0; 3640 3641 ucode_start_addr = (uint8_t *)ta_hdr + 3642 le32_to_cpu(desc->offset_bytes) + 3643 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3644 3645 switch (desc->fw_type) { 3646 case TA_FW_TYPE_PSP_ASD: 3647 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3648 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3649 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3650 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3651 break; 3652 case TA_FW_TYPE_PSP_XGMI: 3653 case TA_FW_TYPE_PSP_XGMI_AUX: 3654 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3655 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3656 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3657 break; 3658 case TA_FW_TYPE_PSP_RAS: 3659 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3660 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3661 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3662 break; 3663 case TA_FW_TYPE_PSP_HDCP: 3664 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3665 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3666 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3667 break; 3668 case TA_FW_TYPE_PSP_DTM: 3669 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3670 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3671 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3672 break; 3673 case TA_FW_TYPE_PSP_RAP: 3674 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3675 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3676 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3677 break; 3678 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3679 psp->securedisplay_context.context.bin_desc.fw_version = 3680 le32_to_cpu(desc->fw_version); 3681 psp->securedisplay_context.context.bin_desc.size_bytes = 3682 le32_to_cpu(desc->size_bytes); 3683 psp->securedisplay_context.context.bin_desc.start_addr = 3684 ucode_start_addr; 3685 break; 3686 default: 3687 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3688 break; 3689 } 3690 3691 return 0; 3692 } 3693 3694 static int parse_ta_v1_microcode(struct psp_context *psp) 3695 { 3696 const struct ta_firmware_header_v1_0 *ta_hdr; 3697 struct amdgpu_device *adev = psp->adev; 3698 3699 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3700 3701 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3702 return -EINVAL; 3703 3704 adev->psp.xgmi_context.context.bin_desc.fw_version = 3705 le32_to_cpu(ta_hdr->xgmi.fw_version); 3706 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3707 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3708 adev->psp.xgmi_context.context.bin_desc.start_addr = 3709 (uint8_t *)ta_hdr + 3710 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3711 3712 adev->psp.ras_context.context.bin_desc.fw_version = 3713 le32_to_cpu(ta_hdr->ras.fw_version); 3714 adev->psp.ras_context.context.bin_desc.size_bytes = 3715 le32_to_cpu(ta_hdr->ras.size_bytes); 3716 adev->psp.ras_context.context.bin_desc.start_addr = 3717 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3718 le32_to_cpu(ta_hdr->ras.offset_bytes); 3719 3720 adev->psp.hdcp_context.context.bin_desc.fw_version = 3721 le32_to_cpu(ta_hdr->hdcp.fw_version); 3722 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3723 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3724 adev->psp.hdcp_context.context.bin_desc.start_addr = 3725 (uint8_t *)ta_hdr + 3726 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3727 3728 adev->psp.dtm_context.context.bin_desc.fw_version = 3729 le32_to_cpu(ta_hdr->dtm.fw_version); 3730 adev->psp.dtm_context.context.bin_desc.size_bytes = 3731 le32_to_cpu(ta_hdr->dtm.size_bytes); 3732 adev->psp.dtm_context.context.bin_desc.start_addr = 3733 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3734 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3735 3736 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3737 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3738 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3739 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3740 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3741 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3742 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3743 3744 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3745 3746 return 0; 3747 } 3748 3749 static int parse_ta_v2_microcode(struct psp_context *psp) 3750 { 3751 const struct ta_firmware_header_v2_0 *ta_hdr; 3752 struct amdgpu_device *adev = psp->adev; 3753 int err = 0; 3754 int ta_index = 0; 3755 3756 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3757 3758 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3759 return -EINVAL; 3760 3761 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3762 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3763 return -EINVAL; 3764 } 3765 3766 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3767 err = parse_ta_bin_descriptor(psp, 3768 &ta_hdr->ta_fw_bin[ta_index], 3769 ta_hdr); 3770 if (err) 3771 return err; 3772 } 3773 3774 return 0; 3775 } 3776 3777 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3778 { 3779 const struct common_firmware_header *hdr; 3780 struct amdgpu_device *adev = psp->adev; 3781 int err; 3782 3783 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, 3784 "amdgpu/%s_ta.bin", chip_name); 3785 if (err) 3786 return err; 3787 3788 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3789 switch (le16_to_cpu(hdr->header_version_major)) { 3790 case 1: 3791 err = parse_ta_v1_microcode(psp); 3792 break; 3793 case 2: 3794 err = parse_ta_v2_microcode(psp); 3795 break; 3796 default: 3797 dev_err(adev->dev, "unsupported TA header version\n"); 3798 err = -EINVAL; 3799 } 3800 3801 if (err) 3802 amdgpu_ucode_release(&adev->psp.ta_fw); 3803 3804 return err; 3805 } 3806 3807 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3808 { 3809 struct amdgpu_device *adev = psp->adev; 3810 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3811 struct amdgpu_firmware_info *info = NULL; 3812 int err = 0; 3813 3814 if (!amdgpu_sriov_vf(adev)) { 3815 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3816 return -EINVAL; 3817 } 3818 3819 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL, 3820 "amdgpu/%s_cap.bin", chip_name); 3821 if (err) { 3822 if (err == -ENODEV) { 3823 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3824 err = 0; 3825 } else { 3826 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3827 } 3828 goto out; 3829 } 3830 3831 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3832 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3833 info->fw = adev->psp.cap_fw; 3834 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3835 adev->psp.cap_fw->data; 3836 adev->firmware.fw_size += ALIGN( 3837 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3838 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3839 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3840 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3841 3842 return 0; 3843 3844 out: 3845 amdgpu_ucode_release(&adev->psp.cap_fw); 3846 return err; 3847 } 3848 3849 int psp_config_sq_perfmon(struct psp_context *psp, 3850 uint32_t xcp_id, bool core_override_enable, 3851 bool reg_override_enable, bool perfmon_override_enable) 3852 { 3853 int ret; 3854 3855 if (amdgpu_sriov_vf(psp->adev)) 3856 return 0; 3857 3858 if (xcp_id > MAX_XCP) { 3859 dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id); 3860 return -EINVAL; 3861 } 3862 3863 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) { 3864 dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n", 3865 amdgpu_ip_version(psp->adev, MP0_HWIP, 0)); 3866 return -EINVAL; 3867 } 3868 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 3869 3870 cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON; 3871 cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id); 3872 cmd->cmd.config_sq_perfmon.core_override = core_override_enable; 3873 cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable; 3874 cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable; 3875 3876 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 3877 if (ret) 3878 dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n", 3879 xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable); 3880 3881 release_psp_cmd_buf(psp); 3882 return ret; 3883 } 3884 3885 static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 3886 enum amd_clockgating_state state) 3887 { 3888 return 0; 3889 } 3890 3891 static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block, 3892 enum amd_powergating_state state) 3893 { 3894 return 0; 3895 } 3896 3897 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 3898 struct device_attribute *attr, 3899 char *buf) 3900 { 3901 struct drm_device *ddev = dev_get_drvdata(dev); 3902 struct amdgpu_device *adev = drm_to_adev(ddev); 3903 struct amdgpu_ip_block *ip_block; 3904 uint32_t fw_ver; 3905 int ret; 3906 3907 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 3908 if (!ip_block || !ip_block->status.late_initialized) { 3909 dev_info(adev->dev, "PSP block is not ready yet\n."); 3910 return -EBUSY; 3911 } 3912 3913 mutex_lock(&adev->psp.mutex); 3914 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 3915 mutex_unlock(&adev->psp.mutex); 3916 3917 if (ret) { 3918 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret); 3919 return ret; 3920 } 3921 3922 return sysfs_emit(buf, "%x\n", fw_ver); 3923 } 3924 3925 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 3926 struct device_attribute *attr, 3927 const char *buf, 3928 size_t count) 3929 { 3930 struct drm_device *ddev = dev_get_drvdata(dev); 3931 struct amdgpu_device *adev = drm_to_adev(ddev); 3932 int ret, idx; 3933 const struct firmware *usbc_pd_fw; 3934 struct amdgpu_bo *fw_buf_bo = NULL; 3935 uint64_t fw_pri_mc_addr; 3936 void *fw_pri_cpu_addr; 3937 struct amdgpu_ip_block *ip_block; 3938 3939 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP); 3940 if (!ip_block || !ip_block->status.late_initialized) { 3941 dev_err(adev->dev, "PSP block is not ready yet."); 3942 return -EBUSY; 3943 } 3944 3945 if (!drm_dev_enter(ddev, &idx)) 3946 return -ENODEV; 3947 3948 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED, 3949 "amdgpu/%s", buf); 3950 if (ret) 3951 goto fail; 3952 3953 /* LFB address which is aligned to 1MB boundary per PSP request */ 3954 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3955 AMDGPU_GEM_DOMAIN_VRAM | 3956 AMDGPU_GEM_DOMAIN_GTT, 3957 &fw_buf_bo, &fw_pri_mc_addr, 3958 &fw_pri_cpu_addr); 3959 if (ret) 3960 goto rel_buf; 3961 3962 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 3963 3964 mutex_lock(&adev->psp.mutex); 3965 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 3966 mutex_unlock(&adev->psp.mutex); 3967 3968 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3969 3970 rel_buf: 3971 amdgpu_ucode_release(&usbc_pd_fw); 3972 fail: 3973 if (ret) { 3974 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret); 3975 count = ret; 3976 } 3977 3978 drm_dev_exit(idx); 3979 return count; 3980 } 3981 3982 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 3983 { 3984 int idx; 3985 3986 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 3987 return; 3988 3989 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 3990 memcpy(psp->fw_pri_buf, start_addr, bin_size); 3991 3992 drm_dev_exit(idx); 3993 } 3994 3995 /** 3996 * DOC: usbc_pd_fw 3997 * Reading from this file will retrieve the USB-C PD firmware version. Writing to 3998 * this file will trigger the update process. 3999 */ 4000 static DEVICE_ATTR(usbc_pd_fw, 0644, 4001 psp_usbc_pd_fw_sysfs_read, 4002 psp_usbc_pd_fw_sysfs_write); 4003 4004 int is_psp_fw_valid(struct psp_bin_desc bin) 4005 { 4006 return bin.size_bytes; 4007 } 4008 4009 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 4010 struct bin_attribute *bin_attr, 4011 char *buffer, loff_t pos, size_t count) 4012 { 4013 struct device *dev = kobj_to_dev(kobj); 4014 struct drm_device *ddev = dev_get_drvdata(dev); 4015 struct amdgpu_device *adev = drm_to_adev(ddev); 4016 4017 adev->psp.vbflash_done = false; 4018 4019 /* Safeguard against memory drain */ 4020 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 4021 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B); 4022 kvfree(adev->psp.vbflash_tmp_buf); 4023 adev->psp.vbflash_tmp_buf = NULL; 4024 adev->psp.vbflash_image_size = 0; 4025 return -ENOMEM; 4026 } 4027 4028 /* TODO Just allocate max for now and optimize to realloc later if needed */ 4029 if (!adev->psp.vbflash_tmp_buf) { 4030 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 4031 if (!adev->psp.vbflash_tmp_buf) 4032 return -ENOMEM; 4033 } 4034 4035 mutex_lock(&adev->psp.mutex); 4036 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 4037 adev->psp.vbflash_image_size += count; 4038 mutex_unlock(&adev->psp.mutex); 4039 4040 dev_dbg(adev->dev, "IFWI staged for update\n"); 4041 4042 return count; 4043 } 4044 4045 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 4046 struct bin_attribute *bin_attr, char *buffer, 4047 loff_t pos, size_t count) 4048 { 4049 struct device *dev = kobj_to_dev(kobj); 4050 struct drm_device *ddev = dev_get_drvdata(dev); 4051 struct amdgpu_device *adev = drm_to_adev(ddev); 4052 struct amdgpu_bo *fw_buf_bo = NULL; 4053 uint64_t fw_pri_mc_addr; 4054 void *fw_pri_cpu_addr; 4055 int ret; 4056 4057 if (adev->psp.vbflash_image_size == 0) 4058 return -EINVAL; 4059 4060 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n"); 4061 4062 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 4063 AMDGPU_GPU_PAGE_SIZE, 4064 AMDGPU_GEM_DOMAIN_VRAM, 4065 &fw_buf_bo, 4066 &fw_pri_mc_addr, 4067 &fw_pri_cpu_addr); 4068 if (ret) 4069 goto rel_buf; 4070 4071 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 4072 4073 mutex_lock(&adev->psp.mutex); 4074 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 4075 mutex_unlock(&adev->psp.mutex); 4076 4077 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 4078 4079 rel_buf: 4080 kvfree(adev->psp.vbflash_tmp_buf); 4081 adev->psp.vbflash_tmp_buf = NULL; 4082 adev->psp.vbflash_image_size = 0; 4083 4084 if (ret) { 4085 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret); 4086 return ret; 4087 } 4088 4089 dev_dbg(adev->dev, "PSP IFWI flash process done\n"); 4090 return 0; 4091 } 4092 4093 /** 4094 * DOC: psp_vbflash 4095 * Writing to this file will stage an IFWI for update. Reading from this file 4096 * will trigger the update process. 4097 */ 4098 static struct bin_attribute psp_vbflash_bin_attr = { 4099 .attr = {.name = "psp_vbflash", .mode = 0660}, 4100 .size = 0, 4101 .write = amdgpu_psp_vbflash_write, 4102 .read = amdgpu_psp_vbflash_read, 4103 }; 4104 4105 /** 4106 * DOC: psp_vbflash_status 4107 * The status of the flash process. 4108 * 0: IFWI flash not complete. 4109 * 1: IFWI flash complete. 4110 */ 4111 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 4112 struct device_attribute *attr, 4113 char *buf) 4114 { 4115 struct drm_device *ddev = dev_get_drvdata(dev); 4116 struct amdgpu_device *adev = drm_to_adev(ddev); 4117 uint32_t vbflash_status; 4118 4119 vbflash_status = psp_vbflash_status(&adev->psp); 4120 if (!adev->psp.vbflash_done) 4121 vbflash_status = 0; 4122 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 4123 vbflash_status = 1; 4124 4125 return sysfs_emit(buf, "0x%x\n", vbflash_status); 4126 } 4127 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL); 4128 4129 static struct bin_attribute *bin_flash_attrs[] = { 4130 &psp_vbflash_bin_attr, 4131 NULL 4132 }; 4133 4134 static struct attribute *flash_attrs[] = { 4135 &dev_attr_psp_vbflash_status.attr, 4136 &dev_attr_usbc_pd_fw.attr, 4137 NULL 4138 }; 4139 4140 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 4141 { 4142 struct device *dev = kobj_to_dev(kobj); 4143 struct drm_device *ddev = dev_get_drvdata(dev); 4144 struct amdgpu_device *adev = drm_to_adev(ddev); 4145 4146 if (attr == &dev_attr_usbc_pd_fw.attr) 4147 return adev->psp.sup_pd_fw_up ? 0660 : 0; 4148 4149 return adev->psp.sup_ifwi_up ? 0440 : 0; 4150 } 4151 4152 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, 4153 const struct bin_attribute *attr, 4154 int idx) 4155 { 4156 struct device *dev = kobj_to_dev(kobj); 4157 struct drm_device *ddev = dev_get_drvdata(dev); 4158 struct amdgpu_device *adev = drm_to_adev(ddev); 4159 4160 return adev->psp.sup_ifwi_up ? 0660 : 0; 4161 } 4162 4163 const struct attribute_group amdgpu_flash_attr_group = { 4164 .attrs = flash_attrs, 4165 .bin_attrs = bin_flash_attrs, 4166 .is_bin_visible = amdgpu_bin_flash_attr_is_visible, 4167 .is_visible = amdgpu_flash_attr_is_visible, 4168 }; 4169 4170 const struct amd_ip_funcs psp_ip_funcs = { 4171 .name = "psp", 4172 .early_init = psp_early_init, 4173 .sw_init = psp_sw_init, 4174 .sw_fini = psp_sw_fini, 4175 .hw_init = psp_hw_init, 4176 .hw_fini = psp_hw_fini, 4177 .suspend = psp_suspend, 4178 .resume = psp_resume, 4179 .set_clockgating_state = psp_set_clockgating_state, 4180 .set_powergating_state = psp_set_powergating_state, 4181 }; 4182 4183 const struct amdgpu_ip_block_version psp_v3_1_ip_block = { 4184 .type = AMD_IP_BLOCK_TYPE_PSP, 4185 .major = 3, 4186 .minor = 1, 4187 .rev = 0, 4188 .funcs = &psp_ip_funcs, 4189 }; 4190 4191 const struct amdgpu_ip_block_version psp_v10_0_ip_block = { 4192 .type = AMD_IP_BLOCK_TYPE_PSP, 4193 .major = 10, 4194 .minor = 0, 4195 .rev = 0, 4196 .funcs = &psp_ip_funcs, 4197 }; 4198 4199 const struct amdgpu_ip_block_version psp_v11_0_ip_block = { 4200 .type = AMD_IP_BLOCK_TYPE_PSP, 4201 .major = 11, 4202 .minor = 0, 4203 .rev = 0, 4204 .funcs = &psp_ip_funcs, 4205 }; 4206 4207 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 4208 .type = AMD_IP_BLOCK_TYPE_PSP, 4209 .major = 11, 4210 .minor = 0, 4211 .rev = 8, 4212 .funcs = &psp_ip_funcs, 4213 }; 4214 4215 const struct amdgpu_ip_block_version psp_v12_0_ip_block = { 4216 .type = AMD_IP_BLOCK_TYPE_PSP, 4217 .major = 12, 4218 .minor = 0, 4219 .rev = 0, 4220 .funcs = &psp_ip_funcs, 4221 }; 4222 4223 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 4224 .type = AMD_IP_BLOCK_TYPE_PSP, 4225 .major = 13, 4226 .minor = 0, 4227 .rev = 0, 4228 .funcs = &psp_ip_funcs, 4229 }; 4230 4231 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 4232 .type = AMD_IP_BLOCK_TYPE_PSP, 4233 .major = 13, 4234 .minor = 0, 4235 .rev = 4, 4236 .funcs = &psp_ip_funcs, 4237 }; 4238 4239 const struct amdgpu_ip_block_version psp_v14_0_ip_block = { 4240 .type = AMD_IP_BLOCK_TYPE_PSP, 4241 .major = 14, 4242 .minor = 0, 4243 .rev = 0, 4244 .funcs = &psp_ip_funcs, 4245 }; 4246