1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <linux/dma-mapping.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "soc15_common.h" 33 #include "psp_v3_1.h" 34 #include "psp_v10_0.h" 35 #include "psp_v11_0.h" 36 #include "psp_v12_0.h" 37 38 #include "amdgpu_ras.h" 39 40 static int psp_sysfs_init(struct amdgpu_device *adev); 41 static void psp_sysfs_fini(struct amdgpu_device *adev); 42 43 static int psp_load_smu_fw(struct psp_context *psp); 44 45 /* 46 * Due to DF Cstate management centralized to PMFW, the firmware 47 * loading sequence will be updated as below: 48 * - Load KDB 49 * - Load SYS_DRV 50 * - Load tOS 51 * - Load PMFW 52 * - Setup TMR 53 * - Load other non-psp fw 54 * - Load ASD 55 * - Load XGMI/RAS/HDCP/DTM TA if any 56 * 57 * This new sequence is required for 58 * - Arcturus 59 * - Navi12 and onwards 60 */ 61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 62 { 63 struct amdgpu_device *adev = psp->adev; 64 65 psp->pmfw_centralized_cstate_management = false; 66 67 if (amdgpu_sriov_vf(adev)) 68 return; 69 70 if (adev->flags & AMD_IS_APU) 71 return; 72 73 if ((adev->asic_type == CHIP_ARCTURUS) || 74 (adev->asic_type >= CHIP_NAVI12)) 75 psp->pmfw_centralized_cstate_management = true; 76 } 77 78 static int psp_early_init(void *handle) 79 { 80 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct psp_context *psp = &adev->psp; 82 83 switch (adev->asic_type) { 84 case CHIP_VEGA10: 85 case CHIP_VEGA12: 86 psp_v3_1_set_psp_funcs(psp); 87 psp->autoload_supported = false; 88 break; 89 case CHIP_RAVEN: 90 psp_v10_0_set_psp_funcs(psp); 91 psp->autoload_supported = false; 92 break; 93 case CHIP_VEGA20: 94 case CHIP_ARCTURUS: 95 psp_v11_0_set_psp_funcs(psp); 96 psp->autoload_supported = false; 97 break; 98 case CHIP_NAVI10: 99 case CHIP_NAVI14: 100 case CHIP_NAVI12: 101 psp_v11_0_set_psp_funcs(psp); 102 psp->autoload_supported = true; 103 break; 104 case CHIP_RENOIR: 105 psp_v12_0_set_psp_funcs(psp); 106 break; 107 default: 108 return -EINVAL; 109 } 110 111 psp->adev = adev; 112 113 psp_check_pmfw_centralized_cstate_management(psp); 114 115 return 0; 116 } 117 118 static int psp_sw_init(void *handle) 119 { 120 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 121 struct psp_context *psp = &adev->psp; 122 int ret; 123 124 ret = psp_init_microcode(psp); 125 if (ret) { 126 DRM_ERROR("Failed to load psp firmware!\n"); 127 return ret; 128 } 129 130 ret = psp_mem_training_init(psp); 131 if (ret) { 132 DRM_ERROR("Failed to initialize memory training!\n"); 133 return ret; 134 } 135 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 136 if (ret) { 137 DRM_ERROR("Failed to process memory training!\n"); 138 return ret; 139 } 140 141 if (adev->asic_type == CHIP_NAVI10) { 142 ret= psp_sysfs_init(adev); 143 if (ret) { 144 return ret; 145 } 146 } 147 148 return 0; 149 } 150 151 static int psp_sw_fini(void *handle) 152 { 153 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 154 155 psp_mem_training_fini(&adev->psp); 156 release_firmware(adev->psp.sos_fw); 157 adev->psp.sos_fw = NULL; 158 release_firmware(adev->psp.asd_fw); 159 adev->psp.asd_fw = NULL; 160 if (adev->psp.ta_fw) { 161 release_firmware(adev->psp.ta_fw); 162 adev->psp.ta_fw = NULL; 163 } 164 165 if (adev->asic_type == CHIP_NAVI10) 166 psp_sysfs_fini(adev); 167 168 return 0; 169 } 170 171 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 172 uint32_t reg_val, uint32_t mask, bool check_changed) 173 { 174 uint32_t val; 175 int i; 176 struct amdgpu_device *adev = psp->adev; 177 178 for (i = 0; i < adev->usec_timeout; i++) { 179 val = RREG32(reg_index); 180 if (check_changed) { 181 if (val != reg_val) 182 return 0; 183 } else { 184 if ((val & mask) == reg_val) 185 return 0; 186 } 187 udelay(1); 188 } 189 190 return -ETIME; 191 } 192 193 static int 194 psp_cmd_submit_buf(struct psp_context *psp, 195 struct amdgpu_firmware_info *ucode, 196 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 197 { 198 int ret; 199 int index; 200 int timeout = 2000; 201 bool ras_intr = false; 202 bool skip_unsupport = false; 203 204 mutex_lock(&psp->mutex); 205 206 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 207 208 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 209 210 index = atomic_inc_return(&psp->fence_value); 211 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 212 if (ret) { 213 atomic_dec(&psp->fence_value); 214 mutex_unlock(&psp->mutex); 215 return ret; 216 } 217 218 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 219 while (*((unsigned int *)psp->fence_buf) != index) { 220 if (--timeout == 0) 221 break; 222 /* 223 * Shouldn't wait for timeout when err_event_athub occurs, 224 * because gpu reset thread triggered and lock resource should 225 * be released for psp resume sequence. 226 */ 227 ras_intr = amdgpu_ras_intr_triggered(); 228 if (ras_intr) 229 break; 230 msleep(1); 231 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 232 } 233 234 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */ 235 skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev); 236 237 /* In some cases, psp response status is not 0 even there is no 238 * problem while the command is submitted. Some version of PSP FW 239 * doesn't write 0 to that field. 240 * So here we would like to only print a warning instead of an error 241 * during psp initialization to avoid breaking hw_init and it doesn't 242 * return -EINVAL. 243 */ 244 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 245 if (ucode) 246 DRM_WARN("failed to load ucode id (%d) ", 247 ucode->ucode_id); 248 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 249 psp->cmd_buf_mem->cmd_id, 250 psp->cmd_buf_mem->resp.status); 251 if (!timeout) { 252 mutex_unlock(&psp->mutex); 253 return -EINVAL; 254 } 255 } 256 257 /* get xGMI session id from response buffer */ 258 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; 259 260 if (ucode) { 261 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 262 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 263 } 264 mutex_unlock(&psp->mutex); 265 266 return ret; 267 } 268 269 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 270 struct psp_gfx_cmd_resp *cmd, 271 uint64_t tmr_mc, uint32_t size) 272 { 273 if (amdgpu_sriov_vf(psp->adev)) 274 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 275 else 276 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 277 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 278 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 279 cmd->cmd.cmd_setup_tmr.buf_size = size; 280 } 281 282 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 283 uint64_t pri_buf_mc, uint32_t size) 284 { 285 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 286 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 287 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 288 cmd->cmd.cmd_load_toc.toc_size = size; 289 } 290 291 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 292 static int psp_load_toc(struct psp_context *psp, 293 uint32_t *tmr_size) 294 { 295 int ret; 296 struct psp_gfx_cmd_resp *cmd; 297 298 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 299 if (!cmd) 300 return -ENOMEM; 301 /* Copy toc to psp firmware private buffer */ 302 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 303 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); 304 305 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); 306 307 ret = psp_cmd_submit_buf(psp, NULL, cmd, 308 psp->fence_buf_mc_addr); 309 if (!ret) 310 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 311 kfree(cmd); 312 return ret; 313 } 314 315 /* Set up Trusted Memory Region */ 316 static int psp_tmr_init(struct psp_context *psp) 317 { 318 int ret; 319 int tmr_size; 320 void *tmr_buf; 321 void **pptr; 322 323 /* 324 * According to HW engineer, they prefer the TMR address be "naturally 325 * aligned" , e.g. the start address be an integer divide of TMR size. 326 * 327 * Note: this memory need be reserved till the driver 328 * uninitializes. 329 */ 330 tmr_size = PSP_TMR_SIZE; 331 332 /* For ASICs support RLC autoload, psp will parse the toc 333 * and calculate the total size of TMR needed */ 334 if (!amdgpu_sriov_vf(psp->adev) && 335 psp->toc_start_addr && 336 psp->toc_bin_size && 337 psp->fw_pri_buf) { 338 ret = psp_load_toc(psp, &tmr_size); 339 if (ret) { 340 DRM_ERROR("Failed to load toc\n"); 341 return ret; 342 } 343 } 344 345 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 346 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, 347 AMDGPU_GEM_DOMAIN_VRAM, 348 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 349 350 return ret; 351 } 352 353 static int psp_tmr_load(struct psp_context *psp) 354 { 355 int ret; 356 struct psp_gfx_cmd_resp *cmd; 357 358 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 359 if (!cmd) 360 return -ENOMEM; 361 362 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 363 amdgpu_bo_size(psp->tmr_bo)); 364 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 365 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 366 367 ret = psp_cmd_submit_buf(psp, NULL, cmd, 368 psp->fence_buf_mc_addr); 369 370 kfree(cmd); 371 372 return ret; 373 } 374 375 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 376 struct psp_gfx_cmd_resp *cmd) 377 { 378 if (amdgpu_sriov_vf(psp->adev)) 379 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 380 else 381 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 382 } 383 384 static int psp_tmr_unload(struct psp_context *psp) 385 { 386 int ret; 387 struct psp_gfx_cmd_resp *cmd; 388 389 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 390 if (!cmd) 391 return -ENOMEM; 392 393 psp_prep_tmr_unload_cmd_buf(psp, cmd); 394 DRM_INFO("free PSP TMR buffer\n"); 395 396 ret = psp_cmd_submit_buf(psp, NULL, cmd, 397 psp->fence_buf_mc_addr); 398 399 kfree(cmd); 400 401 return ret; 402 } 403 404 static int psp_tmr_terminate(struct psp_context *psp) 405 { 406 int ret; 407 void *tmr_buf; 408 void **pptr; 409 410 ret = psp_tmr_unload(psp); 411 if (ret) 412 return ret; 413 414 /* free TMR memory buffer */ 415 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 416 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 417 418 return 0; 419 } 420 421 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 422 uint64_t asd_mc, uint32_t size) 423 { 424 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; 425 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); 426 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); 427 cmd->cmd.cmd_load_ta.app_len = size; 428 429 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; 430 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; 431 cmd->cmd.cmd_load_ta.cmd_buf_len = 0; 432 } 433 434 static int psp_asd_load(struct psp_context *psp) 435 { 436 int ret; 437 struct psp_gfx_cmd_resp *cmd; 438 439 /* If PSP version doesn't match ASD version, asd loading will be failed. 440 * add workaround to bypass it for sriov now. 441 * TODO: add version check to make it common 442 */ 443 if (amdgpu_sriov_vf(psp->adev)) 444 return 0; 445 446 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 447 if (!cmd) 448 return -ENOMEM; 449 450 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 451 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); 452 453 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 454 psp->asd_ucode_size); 455 456 ret = psp_cmd_submit_buf(psp, NULL, cmd, 457 psp->fence_buf_mc_addr); 458 if (!ret) { 459 psp->asd_context.asd_initialized = true; 460 psp->asd_context.session_id = cmd->resp.session_id; 461 } 462 463 kfree(cmd); 464 465 return ret; 466 } 467 468 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 469 uint32_t session_id) 470 { 471 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 472 cmd->cmd.cmd_unload_ta.session_id = session_id; 473 } 474 475 static int psp_asd_unload(struct psp_context *psp) 476 { 477 int ret; 478 struct psp_gfx_cmd_resp *cmd; 479 480 if (amdgpu_sriov_vf(psp->adev)) 481 return 0; 482 483 if (!psp->asd_context.asd_initialized) 484 return 0; 485 486 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 487 if (!cmd) 488 return -ENOMEM; 489 490 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 491 492 ret = psp_cmd_submit_buf(psp, NULL, cmd, 493 psp->fence_buf_mc_addr); 494 if (!ret) 495 psp->asd_context.asd_initialized = false; 496 497 kfree(cmd); 498 499 return ret; 500 } 501 502 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 503 uint32_t id, uint32_t value) 504 { 505 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 506 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 507 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 508 } 509 510 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 511 uint32_t value) 512 { 513 struct psp_gfx_cmd_resp *cmd = NULL; 514 int ret = 0; 515 516 if (reg >= PSP_REG_LAST) 517 return -EINVAL; 518 519 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 520 if (!cmd) 521 return -ENOMEM; 522 523 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 524 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 525 526 kfree(cmd); 527 return ret; 528 } 529 530 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 531 uint64_t ta_bin_mc, 532 uint32_t ta_bin_size, 533 uint64_t ta_shared_mc, 534 uint32_t ta_shared_size) 535 { 536 cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 537 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 538 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 539 cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 540 541 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 542 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 543 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 544 } 545 546 static int psp_xgmi_init_shared_buf(struct psp_context *psp) 547 { 548 int ret; 549 550 /* 551 * Allocate 16k memory aligned to 4k from Frame Buffer (local 552 * physical) for xgmi ta <-> Driver 553 */ 554 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, 555 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 556 &psp->xgmi_context.xgmi_shared_bo, 557 &psp->xgmi_context.xgmi_shared_mc_addr, 558 &psp->xgmi_context.xgmi_shared_buf); 559 560 return ret; 561 } 562 563 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 564 uint32_t ta_cmd_id, 565 uint32_t session_id) 566 { 567 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 568 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 569 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 570 } 571 572 int psp_ta_invoke(struct psp_context *psp, 573 uint32_t ta_cmd_id, 574 uint32_t session_id) 575 { 576 int ret; 577 struct psp_gfx_cmd_resp *cmd; 578 579 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 580 if (!cmd) 581 return -ENOMEM; 582 583 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 584 585 ret = psp_cmd_submit_buf(psp, NULL, cmd, 586 psp->fence_buf_mc_addr); 587 588 kfree(cmd); 589 590 return ret; 591 } 592 593 static int psp_xgmi_load(struct psp_context *psp) 594 { 595 int ret; 596 struct psp_gfx_cmd_resp *cmd; 597 598 /* 599 * TODO: bypass the loading in sriov for now 600 */ 601 602 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 603 if (!cmd) 604 return -ENOMEM; 605 606 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 607 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 608 609 psp_prep_ta_load_cmd_buf(cmd, 610 psp->fw_pri_mc_addr, 611 psp->ta_xgmi_ucode_size, 612 psp->xgmi_context.xgmi_shared_mc_addr, 613 PSP_XGMI_SHARED_MEM_SIZE); 614 615 ret = psp_cmd_submit_buf(psp, NULL, cmd, 616 psp->fence_buf_mc_addr); 617 618 if (!ret) { 619 psp->xgmi_context.initialized = 1; 620 psp->xgmi_context.session_id = cmd->resp.session_id; 621 } 622 623 kfree(cmd); 624 625 return ret; 626 } 627 628 static int psp_xgmi_unload(struct psp_context *psp) 629 { 630 int ret; 631 struct psp_gfx_cmd_resp *cmd; 632 struct amdgpu_device *adev = psp->adev; 633 634 /* XGMI TA unload currently is not supported on Arcturus */ 635 if (adev->asic_type == CHIP_ARCTURUS) 636 return 0; 637 638 /* 639 * TODO: bypass the unloading in sriov for now 640 */ 641 642 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 643 if (!cmd) 644 return -ENOMEM; 645 646 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 647 648 ret = psp_cmd_submit_buf(psp, NULL, cmd, 649 psp->fence_buf_mc_addr); 650 651 kfree(cmd); 652 653 return ret; 654 } 655 656 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 657 { 658 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 659 } 660 661 int psp_xgmi_terminate(struct psp_context *psp) 662 { 663 int ret; 664 665 if (!psp->xgmi_context.initialized) 666 return 0; 667 668 ret = psp_xgmi_unload(psp); 669 if (ret) 670 return ret; 671 672 psp->xgmi_context.initialized = 0; 673 674 /* free xgmi shared memory */ 675 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, 676 &psp->xgmi_context.xgmi_shared_mc_addr, 677 &psp->xgmi_context.xgmi_shared_buf); 678 679 return 0; 680 } 681 682 int psp_xgmi_initialize(struct psp_context *psp) 683 { 684 struct ta_xgmi_shared_memory *xgmi_cmd; 685 int ret; 686 687 if (!psp->adev->psp.ta_fw || 688 !psp->adev->psp.ta_xgmi_ucode_size || 689 !psp->adev->psp.ta_xgmi_start_addr) 690 return -ENOENT; 691 692 if (!psp->xgmi_context.initialized) { 693 ret = psp_xgmi_init_shared_buf(psp); 694 if (ret) 695 return ret; 696 } 697 698 /* Load XGMI TA */ 699 ret = psp_xgmi_load(psp); 700 if (ret) 701 return ret; 702 703 /* Initialize XGMI session */ 704 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); 705 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 706 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 707 708 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 709 710 return ret; 711 } 712 713 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 714 { 715 struct ta_xgmi_shared_memory *xgmi_cmd; 716 int ret; 717 718 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 719 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 720 721 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 722 723 /* Invoke xgmi ta to get hive id */ 724 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 725 if (ret) 726 return ret; 727 728 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 729 730 return 0; 731 } 732 733 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 734 { 735 struct ta_xgmi_shared_memory *xgmi_cmd; 736 int ret; 737 738 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 739 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 740 741 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 742 743 /* Invoke xgmi ta to get the node id */ 744 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 745 if (ret) 746 return ret; 747 748 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 749 750 return 0; 751 } 752 753 int psp_xgmi_get_topology_info(struct psp_context *psp, 754 int number_devices, 755 struct psp_xgmi_topology_info *topology) 756 { 757 struct ta_xgmi_shared_memory *xgmi_cmd; 758 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 759 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 760 int i; 761 int ret; 762 763 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 764 return -EINVAL; 765 766 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 767 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 768 769 /* Fill in the shared memory with topology information as input */ 770 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 771 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; 772 topology_info_input->num_nodes = number_devices; 773 774 for (i = 0; i < topology_info_input->num_nodes; i++) { 775 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 776 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 777 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 778 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 779 } 780 781 /* Invoke xgmi ta to get the topology information */ 782 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); 783 if (ret) 784 return ret; 785 786 /* Read the output topology information from the shared memory */ 787 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 788 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 789 for (i = 0; i < topology->num_nodes; i++) { 790 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 791 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 792 topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled; 793 topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine; 794 } 795 796 return 0; 797 } 798 799 int psp_xgmi_set_topology_info(struct psp_context *psp, 800 int number_devices, 801 struct psp_xgmi_topology_info *topology) 802 { 803 struct ta_xgmi_shared_memory *xgmi_cmd; 804 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 805 int i; 806 807 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 808 return -EINVAL; 809 810 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 811 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 812 813 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 814 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 815 topology_info_input->num_nodes = number_devices; 816 817 for (i = 0; i < topology_info_input->num_nodes; i++) { 818 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 819 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 820 topology_info_input->nodes[i].is_sharing_enabled = 1; 821 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 822 } 823 824 /* Invoke xgmi ta to set topology information */ 825 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 826 } 827 828 // ras begin 829 static int psp_ras_init_shared_buf(struct psp_context *psp) 830 { 831 int ret; 832 833 /* 834 * Allocate 16k memory aligned to 4k from Frame Buffer (local 835 * physical) for ras ta <-> Driver 836 */ 837 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, 838 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 839 &psp->ras.ras_shared_bo, 840 &psp->ras.ras_shared_mc_addr, 841 &psp->ras.ras_shared_buf); 842 843 return ret; 844 } 845 846 static int psp_ras_load(struct psp_context *psp) 847 { 848 int ret; 849 struct psp_gfx_cmd_resp *cmd; 850 851 /* 852 * TODO: bypass the loading in sriov for now 853 */ 854 if (amdgpu_sriov_vf(psp->adev)) 855 return 0; 856 857 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 858 if (!cmd) 859 return -ENOMEM; 860 861 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 862 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 863 864 psp_prep_ta_load_cmd_buf(cmd, 865 psp->fw_pri_mc_addr, 866 psp->ta_ras_ucode_size, 867 psp->ras.ras_shared_mc_addr, 868 PSP_RAS_SHARED_MEM_SIZE); 869 870 ret = psp_cmd_submit_buf(psp, NULL, cmd, 871 psp->fence_buf_mc_addr); 872 873 if (!ret) { 874 psp->ras.ras_initialized = true; 875 psp->ras.session_id = cmd->resp.session_id; 876 } 877 878 kfree(cmd); 879 880 return ret; 881 } 882 883 static int psp_ras_unload(struct psp_context *psp) 884 { 885 int ret; 886 struct psp_gfx_cmd_resp *cmd; 887 888 /* 889 * TODO: bypass the unloading in sriov for now 890 */ 891 if (amdgpu_sriov_vf(psp->adev)) 892 return 0; 893 894 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 895 if (!cmd) 896 return -ENOMEM; 897 898 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 899 900 ret = psp_cmd_submit_buf(psp, NULL, cmd, 901 psp->fence_buf_mc_addr); 902 903 kfree(cmd); 904 905 return ret; 906 } 907 908 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 909 { 910 struct ta_ras_shared_memory *ras_cmd; 911 int ret; 912 913 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 914 915 /* 916 * TODO: bypass the loading in sriov for now 917 */ 918 if (amdgpu_sriov_vf(psp->adev)) 919 return 0; 920 921 ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 922 923 if (amdgpu_ras_intr_triggered()) 924 return ret; 925 926 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) 927 { 928 DRM_WARN("RAS: Unsupported Interface"); 929 return -EINVAL; 930 } 931 932 if (!ret) { 933 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 934 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 935 936 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 937 } 938 else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 939 dev_warn(psp->adev->dev, 940 "RAS internal register access blocked\n"); 941 } 942 943 return ret; 944 } 945 946 int psp_ras_enable_features(struct psp_context *psp, 947 union ta_ras_cmd_input *info, bool enable) 948 { 949 struct ta_ras_shared_memory *ras_cmd; 950 int ret; 951 952 if (!psp->ras.ras_initialized) 953 return -EINVAL; 954 955 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 956 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 957 958 if (enable) 959 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 960 else 961 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 962 963 ras_cmd->ras_in_message = *info; 964 965 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 966 if (ret) 967 return -EINVAL; 968 969 return ras_cmd->ras_status; 970 } 971 972 static int psp_ras_terminate(struct psp_context *psp) 973 { 974 int ret; 975 976 /* 977 * TODO: bypass the terminate in sriov for now 978 */ 979 if (amdgpu_sriov_vf(psp->adev)) 980 return 0; 981 982 if (!psp->ras.ras_initialized) 983 return 0; 984 985 ret = psp_ras_unload(psp); 986 if (ret) 987 return ret; 988 989 psp->ras.ras_initialized = false; 990 991 /* free ras shared memory */ 992 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, 993 &psp->ras.ras_shared_mc_addr, 994 &psp->ras.ras_shared_buf); 995 996 return 0; 997 } 998 999 static int psp_ras_initialize(struct psp_context *psp) 1000 { 1001 int ret; 1002 1003 /* 1004 * TODO: bypass the initialize in sriov for now 1005 */ 1006 if (amdgpu_sriov_vf(psp->adev)) 1007 return 0; 1008 1009 if (!psp->adev->psp.ta_ras_ucode_size || 1010 !psp->adev->psp.ta_ras_start_addr) { 1011 dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); 1012 return 0; 1013 } 1014 1015 if (!psp->ras.ras_initialized) { 1016 ret = psp_ras_init_shared_buf(psp); 1017 if (ret) 1018 return ret; 1019 } 1020 1021 ret = psp_ras_load(psp); 1022 if (ret) 1023 return ret; 1024 1025 return 0; 1026 } 1027 1028 int psp_ras_trigger_error(struct psp_context *psp, 1029 struct ta_ras_trigger_error_input *info) 1030 { 1031 struct ta_ras_shared_memory *ras_cmd; 1032 int ret; 1033 1034 if (!psp->ras.ras_initialized) 1035 return -EINVAL; 1036 1037 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1038 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1039 1040 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1041 ras_cmd->ras_in_message.trigger_error = *info; 1042 1043 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1044 if (ret) 1045 return -EINVAL; 1046 1047 /* If err_event_athub occurs error inject was successful, however 1048 return status from TA is no long reliable */ 1049 if (amdgpu_ras_intr_triggered()) 1050 return 0; 1051 1052 return ras_cmd->ras_status; 1053 } 1054 // ras end 1055 1056 // HDCP start 1057 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 1058 { 1059 int ret; 1060 1061 /* 1062 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1063 * physical) for hdcp ta <-> Driver 1064 */ 1065 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, 1066 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1067 &psp->hdcp_context.hdcp_shared_bo, 1068 &psp->hdcp_context.hdcp_shared_mc_addr, 1069 &psp->hdcp_context.hdcp_shared_buf); 1070 1071 return ret; 1072 } 1073 1074 static int psp_hdcp_load(struct psp_context *psp) 1075 { 1076 int ret; 1077 struct psp_gfx_cmd_resp *cmd; 1078 1079 /* 1080 * TODO: bypass the loading in sriov for now 1081 */ 1082 if (amdgpu_sriov_vf(psp->adev)) 1083 return 0; 1084 1085 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1086 if (!cmd) 1087 return -ENOMEM; 1088 1089 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1090 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 1091 psp->ta_hdcp_ucode_size); 1092 1093 psp_prep_ta_load_cmd_buf(cmd, 1094 psp->fw_pri_mc_addr, 1095 psp->ta_hdcp_ucode_size, 1096 psp->hdcp_context.hdcp_shared_mc_addr, 1097 PSP_HDCP_SHARED_MEM_SIZE); 1098 1099 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1100 1101 if (!ret) { 1102 psp->hdcp_context.hdcp_initialized = true; 1103 psp->hdcp_context.session_id = cmd->resp.session_id; 1104 mutex_init(&psp->hdcp_context.mutex); 1105 } 1106 1107 kfree(cmd); 1108 1109 return ret; 1110 } 1111 static int psp_hdcp_initialize(struct psp_context *psp) 1112 { 1113 int ret; 1114 1115 /* 1116 * TODO: bypass the initialize in sriov for now 1117 */ 1118 if (amdgpu_sriov_vf(psp->adev)) 1119 return 0; 1120 1121 if (!psp->adev->psp.ta_hdcp_ucode_size || 1122 !psp->adev->psp.ta_hdcp_start_addr) { 1123 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1124 return 0; 1125 } 1126 1127 if (!psp->hdcp_context.hdcp_initialized) { 1128 ret = psp_hdcp_init_shared_buf(psp); 1129 if (ret) 1130 return ret; 1131 } 1132 1133 ret = psp_hdcp_load(psp); 1134 if (ret) 1135 return ret; 1136 1137 return 0; 1138 } 1139 1140 static int psp_hdcp_unload(struct psp_context *psp) 1141 { 1142 int ret; 1143 struct psp_gfx_cmd_resp *cmd; 1144 1145 /* 1146 * TODO: bypass the unloading in sriov for now 1147 */ 1148 if (amdgpu_sriov_vf(psp->adev)) 1149 return 0; 1150 1151 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1152 if (!cmd) 1153 return -ENOMEM; 1154 1155 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 1156 1157 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1158 1159 kfree(cmd); 1160 1161 return ret; 1162 } 1163 1164 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1165 { 1166 /* 1167 * TODO: bypass the loading in sriov for now 1168 */ 1169 if (amdgpu_sriov_vf(psp->adev)) 1170 return 0; 1171 1172 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 1173 } 1174 1175 static int psp_hdcp_terminate(struct psp_context *psp) 1176 { 1177 int ret; 1178 1179 /* 1180 * TODO: bypass the terminate in sriov for now 1181 */ 1182 if (amdgpu_sriov_vf(psp->adev)) 1183 return 0; 1184 1185 if (!psp->hdcp_context.hdcp_initialized) 1186 return 0; 1187 1188 ret = psp_hdcp_unload(psp); 1189 if (ret) 1190 return ret; 1191 1192 psp->hdcp_context.hdcp_initialized = false; 1193 1194 /* free hdcp shared memory */ 1195 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, 1196 &psp->hdcp_context.hdcp_shared_mc_addr, 1197 &psp->hdcp_context.hdcp_shared_buf); 1198 1199 return 0; 1200 } 1201 // HDCP end 1202 1203 // DTM start 1204 static int psp_dtm_init_shared_buf(struct psp_context *psp) 1205 { 1206 int ret; 1207 1208 /* 1209 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1210 * physical) for dtm ta <-> Driver 1211 */ 1212 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, 1213 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1214 &psp->dtm_context.dtm_shared_bo, 1215 &psp->dtm_context.dtm_shared_mc_addr, 1216 &psp->dtm_context.dtm_shared_buf); 1217 1218 return ret; 1219 } 1220 1221 static int psp_dtm_load(struct psp_context *psp) 1222 { 1223 int ret; 1224 struct psp_gfx_cmd_resp *cmd; 1225 1226 /* 1227 * TODO: bypass the loading in sriov for now 1228 */ 1229 if (amdgpu_sriov_vf(psp->adev)) 1230 return 0; 1231 1232 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1233 if (!cmd) 1234 return -ENOMEM; 1235 1236 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1237 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 1238 1239 psp_prep_ta_load_cmd_buf(cmd, 1240 psp->fw_pri_mc_addr, 1241 psp->ta_dtm_ucode_size, 1242 psp->dtm_context.dtm_shared_mc_addr, 1243 PSP_DTM_SHARED_MEM_SIZE); 1244 1245 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1246 1247 if (!ret) { 1248 psp->dtm_context.dtm_initialized = true; 1249 psp->dtm_context.session_id = cmd->resp.session_id; 1250 mutex_init(&psp->dtm_context.mutex); 1251 } 1252 1253 kfree(cmd); 1254 1255 return ret; 1256 } 1257 1258 static int psp_dtm_initialize(struct psp_context *psp) 1259 { 1260 int ret; 1261 1262 /* 1263 * TODO: bypass the initialize in sriov for now 1264 */ 1265 if (amdgpu_sriov_vf(psp->adev)) 1266 return 0; 1267 1268 if (!psp->adev->psp.ta_dtm_ucode_size || 1269 !psp->adev->psp.ta_dtm_start_addr) { 1270 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1271 return 0; 1272 } 1273 1274 if (!psp->dtm_context.dtm_initialized) { 1275 ret = psp_dtm_init_shared_buf(psp); 1276 if (ret) 1277 return ret; 1278 } 1279 1280 ret = psp_dtm_load(psp); 1281 if (ret) 1282 return ret; 1283 1284 return 0; 1285 } 1286 1287 static int psp_dtm_unload(struct psp_context *psp) 1288 { 1289 int ret; 1290 struct psp_gfx_cmd_resp *cmd; 1291 1292 /* 1293 * TODO: bypass the unloading in sriov for now 1294 */ 1295 if (amdgpu_sriov_vf(psp->adev)) 1296 return 0; 1297 1298 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1299 if (!cmd) 1300 return -ENOMEM; 1301 1302 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); 1303 1304 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1305 1306 kfree(cmd); 1307 1308 return ret; 1309 } 1310 1311 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1312 { 1313 /* 1314 * TODO: bypass the loading in sriov for now 1315 */ 1316 if (amdgpu_sriov_vf(psp->adev)) 1317 return 0; 1318 1319 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1320 } 1321 1322 static int psp_dtm_terminate(struct psp_context *psp) 1323 { 1324 int ret; 1325 1326 /* 1327 * TODO: bypass the terminate in sriov for now 1328 */ 1329 if (amdgpu_sriov_vf(psp->adev)) 1330 return 0; 1331 1332 if (!psp->dtm_context.dtm_initialized) 1333 return 0; 1334 1335 ret = psp_dtm_unload(psp); 1336 if (ret) 1337 return ret; 1338 1339 psp->dtm_context.dtm_initialized = false; 1340 1341 /* free hdcp shared memory */ 1342 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, 1343 &psp->dtm_context.dtm_shared_mc_addr, 1344 &psp->dtm_context.dtm_shared_buf); 1345 1346 return 0; 1347 } 1348 // DTM end 1349 1350 static int psp_hw_start(struct psp_context *psp) 1351 { 1352 struct amdgpu_device *adev = psp->adev; 1353 int ret; 1354 1355 if (!amdgpu_sriov_vf(adev)) { 1356 if (psp->kdb_bin_size && 1357 (psp->funcs->bootloader_load_kdb != NULL)) { 1358 ret = psp_bootloader_load_kdb(psp); 1359 if (ret) { 1360 DRM_ERROR("PSP load kdb failed!\n"); 1361 return ret; 1362 } 1363 } 1364 1365 ret = psp_bootloader_load_sysdrv(psp); 1366 if (ret) { 1367 DRM_ERROR("PSP load sysdrv failed!\n"); 1368 return ret; 1369 } 1370 1371 ret = psp_bootloader_load_sos(psp); 1372 if (ret) { 1373 DRM_ERROR("PSP load sos failed!\n"); 1374 return ret; 1375 } 1376 } 1377 1378 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 1379 if (ret) { 1380 DRM_ERROR("PSP create ring failed!\n"); 1381 return ret; 1382 } 1383 1384 ret = psp_tmr_init(psp); 1385 if (ret) { 1386 DRM_ERROR("PSP tmr init failed!\n"); 1387 return ret; 1388 } 1389 1390 /* 1391 * For ASICs with DF Cstate management centralized 1392 * to PMFW, TMR setup should be performed after PMFW 1393 * loaded and before other non-psp firmware loaded. 1394 */ 1395 if (psp->pmfw_centralized_cstate_management) { 1396 ret = psp_load_smu_fw(psp); 1397 if (ret) 1398 return ret; 1399 } 1400 1401 ret = psp_tmr_load(psp); 1402 if (ret) { 1403 DRM_ERROR("PSP load tmr failed!\n"); 1404 return ret; 1405 } 1406 1407 return 0; 1408 } 1409 1410 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 1411 enum psp_gfx_fw_type *type) 1412 { 1413 switch (ucode->ucode_id) { 1414 case AMDGPU_UCODE_ID_SDMA0: 1415 *type = GFX_FW_TYPE_SDMA0; 1416 break; 1417 case AMDGPU_UCODE_ID_SDMA1: 1418 *type = GFX_FW_TYPE_SDMA1; 1419 break; 1420 case AMDGPU_UCODE_ID_SDMA2: 1421 *type = GFX_FW_TYPE_SDMA2; 1422 break; 1423 case AMDGPU_UCODE_ID_SDMA3: 1424 *type = GFX_FW_TYPE_SDMA3; 1425 break; 1426 case AMDGPU_UCODE_ID_SDMA4: 1427 *type = GFX_FW_TYPE_SDMA4; 1428 break; 1429 case AMDGPU_UCODE_ID_SDMA5: 1430 *type = GFX_FW_TYPE_SDMA5; 1431 break; 1432 case AMDGPU_UCODE_ID_SDMA6: 1433 *type = GFX_FW_TYPE_SDMA6; 1434 break; 1435 case AMDGPU_UCODE_ID_SDMA7: 1436 *type = GFX_FW_TYPE_SDMA7; 1437 break; 1438 case AMDGPU_UCODE_ID_CP_CE: 1439 *type = GFX_FW_TYPE_CP_CE; 1440 break; 1441 case AMDGPU_UCODE_ID_CP_PFP: 1442 *type = GFX_FW_TYPE_CP_PFP; 1443 break; 1444 case AMDGPU_UCODE_ID_CP_ME: 1445 *type = GFX_FW_TYPE_CP_ME; 1446 break; 1447 case AMDGPU_UCODE_ID_CP_MEC1: 1448 *type = GFX_FW_TYPE_CP_MEC; 1449 break; 1450 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1451 *type = GFX_FW_TYPE_CP_MEC_ME1; 1452 break; 1453 case AMDGPU_UCODE_ID_CP_MEC2: 1454 *type = GFX_FW_TYPE_CP_MEC; 1455 break; 1456 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1457 *type = GFX_FW_TYPE_CP_MEC_ME2; 1458 break; 1459 case AMDGPU_UCODE_ID_RLC_G: 1460 *type = GFX_FW_TYPE_RLC_G; 1461 break; 1462 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 1463 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 1464 break; 1465 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 1466 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 1467 break; 1468 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 1469 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 1470 break; 1471 case AMDGPU_UCODE_ID_SMC: 1472 *type = GFX_FW_TYPE_SMU; 1473 break; 1474 case AMDGPU_UCODE_ID_UVD: 1475 *type = GFX_FW_TYPE_UVD; 1476 break; 1477 case AMDGPU_UCODE_ID_UVD1: 1478 *type = GFX_FW_TYPE_UVD1; 1479 break; 1480 case AMDGPU_UCODE_ID_VCE: 1481 *type = GFX_FW_TYPE_VCE; 1482 break; 1483 case AMDGPU_UCODE_ID_VCN: 1484 *type = GFX_FW_TYPE_VCN; 1485 break; 1486 case AMDGPU_UCODE_ID_VCN1: 1487 *type = GFX_FW_TYPE_VCN1; 1488 break; 1489 case AMDGPU_UCODE_ID_DMCU_ERAM: 1490 *type = GFX_FW_TYPE_DMCU_ERAM; 1491 break; 1492 case AMDGPU_UCODE_ID_DMCU_INTV: 1493 *type = GFX_FW_TYPE_DMCU_ISR; 1494 break; 1495 case AMDGPU_UCODE_ID_VCN0_RAM: 1496 *type = GFX_FW_TYPE_VCN0_RAM; 1497 break; 1498 case AMDGPU_UCODE_ID_VCN1_RAM: 1499 *type = GFX_FW_TYPE_VCN1_RAM; 1500 break; 1501 case AMDGPU_UCODE_ID_DMCUB: 1502 *type = GFX_FW_TYPE_DMUB; 1503 break; 1504 case AMDGPU_UCODE_ID_MAXIMUM: 1505 default: 1506 return -EINVAL; 1507 } 1508 1509 return 0; 1510 } 1511 1512 static void psp_print_fw_hdr(struct psp_context *psp, 1513 struct amdgpu_firmware_info *ucode) 1514 { 1515 struct amdgpu_device *adev = psp->adev; 1516 struct common_firmware_header *hdr; 1517 1518 switch (ucode->ucode_id) { 1519 case AMDGPU_UCODE_ID_SDMA0: 1520 case AMDGPU_UCODE_ID_SDMA1: 1521 case AMDGPU_UCODE_ID_SDMA2: 1522 case AMDGPU_UCODE_ID_SDMA3: 1523 case AMDGPU_UCODE_ID_SDMA4: 1524 case AMDGPU_UCODE_ID_SDMA5: 1525 case AMDGPU_UCODE_ID_SDMA6: 1526 case AMDGPU_UCODE_ID_SDMA7: 1527 hdr = (struct common_firmware_header *) 1528 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 1529 amdgpu_ucode_print_sdma_hdr(hdr); 1530 break; 1531 case AMDGPU_UCODE_ID_CP_CE: 1532 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 1533 amdgpu_ucode_print_gfx_hdr(hdr); 1534 break; 1535 case AMDGPU_UCODE_ID_CP_PFP: 1536 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 1537 amdgpu_ucode_print_gfx_hdr(hdr); 1538 break; 1539 case AMDGPU_UCODE_ID_CP_ME: 1540 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 1541 amdgpu_ucode_print_gfx_hdr(hdr); 1542 break; 1543 case AMDGPU_UCODE_ID_CP_MEC1: 1544 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 1545 amdgpu_ucode_print_gfx_hdr(hdr); 1546 break; 1547 case AMDGPU_UCODE_ID_RLC_G: 1548 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 1549 amdgpu_ucode_print_rlc_hdr(hdr); 1550 break; 1551 case AMDGPU_UCODE_ID_SMC: 1552 hdr = (struct common_firmware_header *)adev->pm.fw->data; 1553 amdgpu_ucode_print_smc_hdr(hdr); 1554 break; 1555 default: 1556 break; 1557 } 1558 } 1559 1560 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 1561 struct psp_gfx_cmd_resp *cmd) 1562 { 1563 int ret; 1564 uint64_t fw_mem_mc_addr = ucode->mc_addr; 1565 1566 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 1567 1568 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1569 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 1570 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 1571 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 1572 1573 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 1574 if (ret) 1575 DRM_ERROR("Unknown firmware type\n"); 1576 1577 return ret; 1578 } 1579 1580 static int psp_execute_np_fw_load(struct psp_context *psp, 1581 struct amdgpu_firmware_info *ucode) 1582 { 1583 int ret = 0; 1584 1585 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); 1586 if (ret) 1587 return ret; 1588 1589 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, 1590 psp->fence_buf_mc_addr); 1591 1592 return ret; 1593 } 1594 1595 static int psp_load_smu_fw(struct psp_context *psp) 1596 { 1597 int ret; 1598 struct amdgpu_device* adev = psp->adev; 1599 struct amdgpu_firmware_info *ucode = 1600 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 1601 struct amdgpu_ras *ras = psp->ras.ras; 1602 1603 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 1604 return 0; 1605 1606 1607 if (adev->in_gpu_reset && ras && ras->supported) { 1608 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 1609 if (ret) { 1610 DRM_WARN("Failed to set MP1 state prepare for reload\n"); 1611 } 1612 } 1613 1614 ret = psp_execute_np_fw_load(psp, ucode); 1615 1616 if (ret) 1617 DRM_ERROR("PSP load smu failed!\n"); 1618 1619 return ret; 1620 } 1621 1622 static bool fw_load_skip_check(struct psp_context *psp, 1623 struct amdgpu_firmware_info *ucode) 1624 { 1625 if (!ucode->fw) 1626 return true; 1627 1628 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1629 (psp_smu_reload_quirk(psp) || 1630 psp->autoload_supported || 1631 psp->pmfw_centralized_cstate_management)) 1632 return true; 1633 1634 if (amdgpu_sriov_vf(psp->adev) && 1635 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 1636 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 1637 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 1638 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 1639 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 1640 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 1641 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 1642 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 1643 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1644 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1645 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1646 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1647 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1648 /*skip ucode loading in SRIOV VF */ 1649 return true; 1650 1651 if (psp->autoload_supported && 1652 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 1653 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 1654 /* skip mec JT when autoload is enabled */ 1655 return true; 1656 1657 return false; 1658 } 1659 1660 static int psp_np_fw_load(struct psp_context *psp) 1661 { 1662 int i, ret; 1663 struct amdgpu_firmware_info *ucode; 1664 struct amdgpu_device* adev = psp->adev; 1665 1666 if (psp->autoload_supported && 1667 !psp->pmfw_centralized_cstate_management) { 1668 ret = psp_load_smu_fw(psp); 1669 if (ret) 1670 return ret; 1671 } 1672 1673 for (i = 0; i < adev->firmware.max_ucodes; i++) { 1674 ucode = &adev->firmware.ucode[i]; 1675 1676 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1677 !fw_load_skip_check(psp, ucode)) { 1678 ret = psp_load_smu_fw(psp); 1679 if (ret) 1680 return ret; 1681 continue; 1682 } 1683 1684 if (fw_load_skip_check(psp, ucode)) 1685 continue; 1686 1687 psp_print_fw_hdr(psp, ucode); 1688 1689 ret = psp_execute_np_fw_load(psp, ucode); 1690 if (ret) 1691 return ret; 1692 1693 /* Start rlc autoload after psp recieved all the gfx firmware */ 1694 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1695 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1696 ret = psp_rlc_autoload_start(psp); 1697 if (ret) { 1698 DRM_ERROR("Failed to start rlc autoload\n"); 1699 return ret; 1700 } 1701 } 1702 } 1703 1704 return 0; 1705 } 1706 1707 static int psp_load_fw(struct amdgpu_device *adev) 1708 { 1709 int ret; 1710 struct psp_context *psp = &adev->psp; 1711 1712 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { 1713 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ 1714 goto skip_memalloc; 1715 } 1716 1717 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1718 if (!psp->cmd) 1719 return -ENOMEM; 1720 1721 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 1722 AMDGPU_GEM_DOMAIN_GTT, 1723 &psp->fw_pri_bo, 1724 &psp->fw_pri_mc_addr, 1725 &psp->fw_pri_buf); 1726 if (ret) 1727 goto failed; 1728 1729 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 1730 AMDGPU_GEM_DOMAIN_VRAM, 1731 &psp->fence_buf_bo, 1732 &psp->fence_buf_mc_addr, 1733 &psp->fence_buf); 1734 if (ret) 1735 goto failed; 1736 1737 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 1738 AMDGPU_GEM_DOMAIN_VRAM, 1739 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1740 (void **)&psp->cmd_buf_mem); 1741 if (ret) 1742 goto failed; 1743 1744 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 1745 1746 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 1747 if (ret) { 1748 DRM_ERROR("PSP ring init failed!\n"); 1749 goto failed; 1750 } 1751 1752 skip_memalloc: 1753 ret = psp_hw_start(psp); 1754 if (ret) 1755 goto failed; 1756 1757 ret = psp_np_fw_load(psp); 1758 if (ret) 1759 goto failed; 1760 1761 ret = psp_asd_load(psp); 1762 if (ret) { 1763 DRM_ERROR("PSP load asd failed!\n"); 1764 return ret; 1765 } 1766 1767 if (psp->adev->psp.ta_fw) { 1768 ret = psp_ras_initialize(psp); 1769 if (ret) 1770 dev_err(psp->adev->dev, 1771 "RAS: Failed to initialize RAS\n"); 1772 1773 ret = psp_hdcp_initialize(psp); 1774 if (ret) 1775 dev_err(psp->adev->dev, 1776 "HDCP: Failed to initialize HDCP\n"); 1777 1778 ret = psp_dtm_initialize(psp); 1779 if (ret) 1780 dev_err(psp->adev->dev, 1781 "DTM: Failed to initialize DTM\n"); 1782 } 1783 1784 return 0; 1785 1786 failed: 1787 /* 1788 * all cleanup jobs (xgmi terminate, ras terminate, 1789 * ring destroy, cmd/fence/fw buffers destory, 1790 * psp->cmd destory) are delayed to psp_hw_fini 1791 */ 1792 return ret; 1793 } 1794 1795 static int psp_hw_init(void *handle) 1796 { 1797 int ret; 1798 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1799 1800 mutex_lock(&adev->firmware.mutex); 1801 /* 1802 * This sequence is just used on hw_init only once, no need on 1803 * resume. 1804 */ 1805 ret = amdgpu_ucode_init_bo(adev); 1806 if (ret) 1807 goto failed; 1808 1809 ret = psp_load_fw(adev); 1810 if (ret) { 1811 DRM_ERROR("PSP firmware loading failed\n"); 1812 goto failed; 1813 } 1814 1815 mutex_unlock(&adev->firmware.mutex); 1816 return 0; 1817 1818 failed: 1819 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 1820 mutex_unlock(&adev->firmware.mutex); 1821 return -EINVAL; 1822 } 1823 1824 static int psp_hw_fini(void *handle) 1825 { 1826 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1827 struct psp_context *psp = &adev->psp; 1828 1829 if (psp->adev->psp.ta_fw) { 1830 psp_ras_terminate(psp); 1831 psp_dtm_terminate(psp); 1832 psp_hdcp_terminate(psp); 1833 } 1834 1835 psp_asd_unload(psp); 1836 1837 psp_tmr_terminate(psp); 1838 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 1839 1840 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 1841 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 1842 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 1843 &psp->fence_buf_mc_addr, &psp->fence_buf); 1844 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1845 (void **)&psp->cmd_buf_mem); 1846 1847 kfree(psp->cmd); 1848 psp->cmd = NULL; 1849 1850 return 0; 1851 } 1852 1853 static int psp_suspend(void *handle) 1854 { 1855 int ret; 1856 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1857 struct psp_context *psp = &adev->psp; 1858 1859 if (adev->gmc.xgmi.num_physical_nodes > 1 && 1860 psp->xgmi_context.initialized == 1) { 1861 ret = psp_xgmi_terminate(psp); 1862 if (ret) { 1863 DRM_ERROR("Failed to terminate xgmi ta\n"); 1864 return ret; 1865 } 1866 } 1867 1868 if (psp->adev->psp.ta_fw) { 1869 ret = psp_ras_terminate(psp); 1870 if (ret) { 1871 DRM_ERROR("Failed to terminate ras ta\n"); 1872 return ret; 1873 } 1874 ret = psp_hdcp_terminate(psp); 1875 if (ret) { 1876 DRM_ERROR("Failed to terminate hdcp ta\n"); 1877 return ret; 1878 } 1879 ret = psp_dtm_terminate(psp); 1880 if (ret) { 1881 DRM_ERROR("Failed to terminate dtm ta\n"); 1882 return ret; 1883 } 1884 } 1885 1886 ret = psp_asd_unload(psp); 1887 if (ret) { 1888 DRM_ERROR("Failed to unload asd\n"); 1889 return ret; 1890 } 1891 1892 ret = psp_tmr_terminate(psp); 1893 if (ret) { 1894 DRM_ERROR("Falied to terminate tmr\n"); 1895 return ret; 1896 } 1897 1898 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 1899 if (ret) { 1900 DRM_ERROR("PSP ring stop failed\n"); 1901 return ret; 1902 } 1903 1904 return 0; 1905 } 1906 1907 static int psp_resume(void *handle) 1908 { 1909 int ret; 1910 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1911 struct psp_context *psp = &adev->psp; 1912 1913 DRM_INFO("PSP is resuming...\n"); 1914 1915 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 1916 if (ret) { 1917 DRM_ERROR("Failed to process memory training!\n"); 1918 return ret; 1919 } 1920 1921 mutex_lock(&adev->firmware.mutex); 1922 1923 ret = psp_hw_start(psp); 1924 if (ret) 1925 goto failed; 1926 1927 ret = psp_np_fw_load(psp); 1928 if (ret) 1929 goto failed; 1930 1931 ret = psp_asd_load(psp); 1932 if (ret) { 1933 DRM_ERROR("PSP load asd failed!\n"); 1934 goto failed; 1935 } 1936 1937 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1938 ret = psp_xgmi_initialize(psp); 1939 /* Warning the XGMI seesion initialize failure 1940 * Instead of stop driver initialization 1941 */ 1942 if (ret) 1943 dev_err(psp->adev->dev, 1944 "XGMI: Failed to initialize XGMI session\n"); 1945 } 1946 1947 if (psp->adev->psp.ta_fw) { 1948 ret = psp_ras_initialize(psp); 1949 if (ret) 1950 dev_err(psp->adev->dev, 1951 "RAS: Failed to initialize RAS\n"); 1952 1953 ret = psp_hdcp_initialize(psp); 1954 if (ret) 1955 dev_err(psp->adev->dev, 1956 "HDCP: Failed to initialize HDCP\n"); 1957 1958 ret = psp_dtm_initialize(psp); 1959 if (ret) 1960 dev_err(psp->adev->dev, 1961 "DTM: Failed to initialize DTM\n"); 1962 } 1963 1964 mutex_unlock(&adev->firmware.mutex); 1965 1966 return 0; 1967 1968 failed: 1969 DRM_ERROR("PSP resume failed\n"); 1970 mutex_unlock(&adev->firmware.mutex); 1971 return ret; 1972 } 1973 1974 int psp_gpu_reset(struct amdgpu_device *adev) 1975 { 1976 int ret; 1977 1978 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1979 return 0; 1980 1981 mutex_lock(&adev->psp.mutex); 1982 ret = psp_mode1_reset(&adev->psp); 1983 mutex_unlock(&adev->psp.mutex); 1984 1985 return ret; 1986 } 1987 1988 int psp_rlc_autoload_start(struct psp_context *psp) 1989 { 1990 int ret; 1991 struct psp_gfx_cmd_resp *cmd; 1992 1993 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1994 if (!cmd) 1995 return -ENOMEM; 1996 1997 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 1998 1999 ret = psp_cmd_submit_buf(psp, NULL, cmd, 2000 psp->fence_buf_mc_addr); 2001 kfree(cmd); 2002 return ret; 2003 } 2004 2005 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 2006 uint64_t cmd_gpu_addr, int cmd_size) 2007 { 2008 struct amdgpu_firmware_info ucode = {0}; 2009 2010 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 2011 AMDGPU_UCODE_ID_VCN0_RAM; 2012 ucode.mc_addr = cmd_gpu_addr; 2013 ucode.ucode_size = cmd_size; 2014 2015 return psp_execute_np_fw_load(&adev->psp, &ucode); 2016 } 2017 2018 int psp_ring_cmd_submit(struct psp_context *psp, 2019 uint64_t cmd_buf_mc_addr, 2020 uint64_t fence_mc_addr, 2021 int index) 2022 { 2023 unsigned int psp_write_ptr_reg = 0; 2024 struct psp_gfx_rb_frame *write_frame; 2025 struct psp_ring *ring = &psp->km_ring; 2026 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 2027 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 2028 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 2029 struct amdgpu_device *adev = psp->adev; 2030 uint32_t ring_size_dw = ring->ring_size / 4; 2031 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 2032 2033 /* KM (GPCOM) prepare write pointer */ 2034 psp_write_ptr_reg = psp_ring_get_wptr(psp); 2035 2036 /* Update KM RB frame pointer to new frame */ 2037 /* write_frame ptr increments by size of rb_frame in bytes */ 2038 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 2039 if ((psp_write_ptr_reg % ring_size_dw) == 0) 2040 write_frame = ring_buffer_start; 2041 else 2042 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 2043 /* Check invalid write_frame ptr address */ 2044 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 2045 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 2046 ring_buffer_start, ring_buffer_end, write_frame); 2047 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 2048 return -EINVAL; 2049 } 2050 2051 /* Initialize KM RB frame */ 2052 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 2053 2054 /* Update KM RB frame */ 2055 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 2056 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 2057 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 2058 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 2059 write_frame->fence_value = index; 2060 amdgpu_asic_flush_hdp(adev, NULL); 2061 2062 /* Update the write Pointer in DWORDs */ 2063 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 2064 psp_ring_set_wptr(psp, psp_write_ptr_reg); 2065 return 0; 2066 } 2067 2068 int psp_init_asd_microcode(struct psp_context *psp, 2069 const char *chip_name) 2070 { 2071 struct amdgpu_device *adev = psp->adev; 2072 char fw_name[30]; 2073 const struct psp_firmware_header_v1_0 *asd_hdr; 2074 int err = 0; 2075 2076 if (!chip_name) { 2077 dev_err(adev->dev, "invalid chip name for asd microcode\n"); 2078 return -EINVAL; 2079 } 2080 2081 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 2082 err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); 2083 if (err) 2084 goto out; 2085 2086 err = amdgpu_ucode_validate(adev->psp.asd_fw); 2087 if (err) 2088 goto out; 2089 2090 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 2091 adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 2092 adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); 2093 adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 2094 adev->psp.asd_start_addr = (uint8_t *)asd_hdr + 2095 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 2096 return 0; 2097 out: 2098 dev_err(adev->dev, "fail to initialize asd microcode\n"); 2099 release_firmware(adev->psp.asd_fw); 2100 adev->psp.asd_fw = NULL; 2101 return err; 2102 } 2103 2104 int psp_init_sos_microcode(struct psp_context *psp, 2105 const char *chip_name) 2106 { 2107 struct amdgpu_device *adev = psp->adev; 2108 char fw_name[30]; 2109 const struct psp_firmware_header_v1_0 *sos_hdr; 2110 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 2111 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 2112 int err = 0; 2113 2114 if (!chip_name) { 2115 dev_err(adev->dev, "invalid chip name for sos microcode\n"); 2116 return -EINVAL; 2117 } 2118 2119 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 2120 err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); 2121 if (err) 2122 goto out; 2123 2124 err = amdgpu_ucode_validate(adev->psp.sos_fw); 2125 if (err) 2126 goto out; 2127 2128 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 2129 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 2130 2131 switch (sos_hdr->header.header_version_major) { 2132 case 1: 2133 adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 2134 adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); 2135 adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); 2136 adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); 2137 adev->psp.sys_start_addr = (uint8_t *)sos_hdr + 2138 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 2139 adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2140 le32_to_cpu(sos_hdr->sos_offset_bytes); 2141 if (sos_hdr->header.header_version_minor == 1) { 2142 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 2143 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); 2144 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2145 le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); 2146 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); 2147 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2148 le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); 2149 } 2150 if (sos_hdr->header.header_version_minor == 2) { 2151 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 2152 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); 2153 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2154 le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); 2155 } 2156 break; 2157 default: 2158 dev_err(adev->dev, 2159 "unsupported psp sos firmware\n"); 2160 err = -EINVAL; 2161 goto out; 2162 } 2163 2164 return 0; 2165 out: 2166 dev_err(adev->dev, 2167 "failed to init sos firmware\n"); 2168 release_firmware(adev->psp.sos_fw); 2169 adev->psp.sos_fw = NULL; 2170 2171 return err; 2172 } 2173 2174 static int psp_set_clockgating_state(void *handle, 2175 enum amd_clockgating_state state) 2176 { 2177 return 0; 2178 } 2179 2180 static int psp_set_powergating_state(void *handle, 2181 enum amd_powergating_state state) 2182 { 2183 return 0; 2184 } 2185 2186 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 2187 struct device_attribute *attr, 2188 char *buf) 2189 { 2190 struct drm_device *ddev = dev_get_drvdata(dev); 2191 struct amdgpu_device *adev = ddev->dev_private; 2192 uint32_t fw_ver; 2193 int ret; 2194 2195 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 2196 DRM_INFO("PSP block is not ready yet."); 2197 return -EBUSY; 2198 } 2199 2200 mutex_lock(&adev->psp.mutex); 2201 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 2202 mutex_unlock(&adev->psp.mutex); 2203 2204 if (ret) { 2205 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 2206 return ret; 2207 } 2208 2209 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); 2210 } 2211 2212 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 2213 struct device_attribute *attr, 2214 const char *buf, 2215 size_t count) 2216 { 2217 struct drm_device *ddev = dev_get_drvdata(dev); 2218 struct amdgpu_device *adev = ddev->dev_private; 2219 void *cpu_addr; 2220 dma_addr_t dma_addr; 2221 int ret; 2222 char fw_name[100]; 2223 const struct firmware *usbc_pd_fw; 2224 2225 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 2226 DRM_INFO("PSP block is not ready yet."); 2227 return -EBUSY; 2228 } 2229 2230 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 2231 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 2232 if (ret) 2233 goto fail; 2234 2235 /* We need contiguous physical mem to place the FW for psp to access */ 2236 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); 2237 2238 ret = dma_mapping_error(adev->dev, dma_addr); 2239 if (ret) 2240 goto rel_buf; 2241 2242 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 2243 2244 /* 2245 * x86 specific workaround. 2246 * Without it the buffer is invisible in PSP. 2247 * 2248 * TODO Remove once PSP starts snooping CPU cache 2249 */ 2250 #ifdef CONFIG_X86 2251 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); 2252 #endif 2253 2254 mutex_lock(&adev->psp.mutex); 2255 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); 2256 mutex_unlock(&adev->psp.mutex); 2257 2258 rel_buf: 2259 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); 2260 release_firmware(usbc_pd_fw); 2261 2262 fail: 2263 if (ret) { 2264 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 2265 return ret; 2266 } 2267 2268 return count; 2269 } 2270 2271 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 2272 psp_usbc_pd_fw_sysfs_read, 2273 psp_usbc_pd_fw_sysfs_write); 2274 2275 2276 2277 const struct amd_ip_funcs psp_ip_funcs = { 2278 .name = "psp", 2279 .early_init = psp_early_init, 2280 .late_init = NULL, 2281 .sw_init = psp_sw_init, 2282 .sw_fini = psp_sw_fini, 2283 .hw_init = psp_hw_init, 2284 .hw_fini = psp_hw_fini, 2285 .suspend = psp_suspend, 2286 .resume = psp_resume, 2287 .is_idle = NULL, 2288 .check_soft_reset = NULL, 2289 .wait_for_idle = NULL, 2290 .soft_reset = NULL, 2291 .set_clockgating_state = psp_set_clockgating_state, 2292 .set_powergating_state = psp_set_powergating_state, 2293 }; 2294 2295 static int psp_sysfs_init(struct amdgpu_device *adev) 2296 { 2297 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 2298 2299 if (ret) 2300 DRM_ERROR("Failed to create USBC PD FW control file!"); 2301 2302 return ret; 2303 } 2304 2305 static void psp_sysfs_fini(struct amdgpu_device *adev) 2306 { 2307 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 2308 } 2309 2310 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 2311 { 2312 .type = AMD_IP_BLOCK_TYPE_PSP, 2313 .major = 3, 2314 .minor = 1, 2315 .rev = 0, 2316 .funcs = &psp_ip_funcs, 2317 }; 2318 2319 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 2320 { 2321 .type = AMD_IP_BLOCK_TYPE_PSP, 2322 .major = 10, 2323 .minor = 0, 2324 .rev = 0, 2325 .funcs = &psp_ip_funcs, 2326 }; 2327 2328 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 2329 { 2330 .type = AMD_IP_BLOCK_TYPE_PSP, 2331 .major = 11, 2332 .minor = 0, 2333 .rev = 0, 2334 .funcs = &psp_ip_funcs, 2335 }; 2336 2337 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 2338 { 2339 .type = AMD_IP_BLOCK_TYPE_PSP, 2340 .major = 12, 2341 .minor = 0, 2342 .rev = 0, 2343 .funcs = &psp_ip_funcs, 2344 }; 2345