1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_smu.h" 30 #include "smu_internal.h" 31 #include "atom.h" 32 #include "arcturus_ppt.h" 33 #include "navi10_ppt.h" 34 #include "sienna_cichlid_ppt.h" 35 #include "renoir_ppt.h" 36 #include "vangogh_ppt.h" 37 #include "amd_pcie.h" 38 39 /* 40 * DO NOT use these for err/warn/info/debug messages. 41 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 42 * They are more MGPU friendly. 43 */ 44 #undef pr_err 45 #undef pr_warn 46 #undef pr_info 47 #undef pr_debug 48 49 static const struct amd_pm_funcs swsmu_pm_funcs; 50 static int smu_force_smuclk_levels(struct smu_context *smu, 51 enum smu_clk_type clk_type, 52 uint32_t mask); 53 54 int smu_sys_get_pp_feature_mask(void *handle, char *buf) 55 { 56 struct smu_context *smu = handle; 57 int size = 0; 58 59 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 60 return -EOPNOTSUPP; 61 62 mutex_lock(&smu->mutex); 63 64 size = smu_get_pp_feature_mask(smu, buf); 65 66 mutex_unlock(&smu->mutex); 67 68 return size; 69 } 70 71 int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask) 72 { 73 struct smu_context *smu = handle; 74 int ret = 0; 75 76 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 77 return -EOPNOTSUPP; 78 79 mutex_lock(&smu->mutex); 80 81 ret = smu_set_pp_feature_mask(smu, new_mask); 82 83 mutex_unlock(&smu->mutex); 84 85 return ret; 86 } 87 88 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 89 { 90 int ret = 0; 91 struct smu_context *smu = &adev->smu; 92 93 if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status) 94 *value = smu_get_gfx_off_status(smu); 95 else 96 ret = -EINVAL; 97 98 return ret; 99 } 100 101 int smu_set_soft_freq_range(struct smu_context *smu, 102 enum smu_clk_type clk_type, 103 uint32_t min, 104 uint32_t max) 105 { 106 int ret = 0; 107 108 mutex_lock(&smu->mutex); 109 110 if (smu->ppt_funcs->set_soft_freq_limited_range) 111 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 112 clk_type, 113 min, 114 max); 115 116 mutex_unlock(&smu->mutex); 117 118 return ret; 119 } 120 121 int smu_get_dpm_freq_range(struct smu_context *smu, 122 enum smu_clk_type clk_type, 123 uint32_t *min, 124 uint32_t *max) 125 { 126 int ret = 0; 127 128 if (!min && !max) 129 return -EINVAL; 130 131 mutex_lock(&smu->mutex); 132 133 if (smu->ppt_funcs->get_dpm_ultimate_freq) 134 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 135 clk_type, 136 min, 137 max); 138 139 mutex_unlock(&smu->mutex); 140 141 return ret; 142 } 143 144 u32 smu_get_mclk(void *handle, bool low) 145 { 146 struct smu_context *smu = handle; 147 uint32_t clk_freq; 148 int ret = 0; 149 150 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, 151 low ? &clk_freq : NULL, 152 !low ? &clk_freq : NULL); 153 if (ret) 154 return 0; 155 return clk_freq * 100; 156 } 157 158 u32 smu_get_sclk(void *handle, bool low) 159 { 160 struct smu_context *smu = handle; 161 uint32_t clk_freq; 162 int ret = 0; 163 164 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, 165 low ? &clk_freq : NULL, 166 !low ? &clk_freq : NULL); 167 if (ret) 168 return 0; 169 return clk_freq * 100; 170 } 171 172 static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, 173 bool enable) 174 { 175 struct smu_power_context *smu_power = &smu->smu_power; 176 struct smu_power_gate *power_gate = &smu_power->power_gate; 177 int ret = 0; 178 179 if (!smu->ppt_funcs->dpm_set_vcn_enable) 180 return 0; 181 182 if (atomic_read(&power_gate->vcn_gated) ^ enable) 183 return 0; 184 185 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 186 if (!ret) 187 atomic_set(&power_gate->vcn_gated, !enable); 188 189 return ret; 190 } 191 192 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 193 bool enable) 194 { 195 struct smu_power_context *smu_power = &smu->smu_power; 196 struct smu_power_gate *power_gate = &smu_power->power_gate; 197 int ret = 0; 198 199 mutex_lock(&power_gate->vcn_gate_lock); 200 201 ret = smu_dpm_set_vcn_enable_locked(smu, enable); 202 203 mutex_unlock(&power_gate->vcn_gate_lock); 204 205 return ret; 206 } 207 208 static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, 209 bool enable) 210 { 211 struct smu_power_context *smu_power = &smu->smu_power; 212 struct smu_power_gate *power_gate = &smu_power->power_gate; 213 int ret = 0; 214 215 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 216 return 0; 217 218 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 219 return 0; 220 221 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 222 if (!ret) 223 atomic_set(&power_gate->jpeg_gated, !enable); 224 225 return ret; 226 } 227 228 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 229 bool enable) 230 { 231 struct smu_power_context *smu_power = &smu->smu_power; 232 struct smu_power_gate *power_gate = &smu_power->power_gate; 233 int ret = 0; 234 235 mutex_lock(&power_gate->jpeg_gate_lock); 236 237 ret = smu_dpm_set_jpeg_enable_locked(smu, enable); 238 239 mutex_unlock(&power_gate->jpeg_gate_lock); 240 241 return ret; 242 } 243 244 /** 245 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 246 * 247 * @handle: smu_context pointer 248 * @block_type: the IP block to power gate/ungate 249 * @gate: to power gate if true, ungate otherwise 250 * 251 * This API uses no smu->mutex lock protection due to: 252 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 253 * This is guarded to be race condition free by the caller. 254 * 2. Or get called on user setting request of power_dpm_force_performance_level. 255 * Under this case, the smu->mutex lock protection is already enforced on 256 * the parent API smu_force_performance_level of the call path. 257 */ 258 int smu_dpm_set_power_gate(void *handle, uint32_t block_type, 259 bool gate) 260 { 261 struct smu_context *smu = handle; 262 int ret = 0; 263 264 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 265 return -EOPNOTSUPP; 266 267 switch (block_type) { 268 /* 269 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 270 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 271 */ 272 case AMD_IP_BLOCK_TYPE_UVD: 273 case AMD_IP_BLOCK_TYPE_VCN: 274 ret = smu_dpm_set_vcn_enable(smu, !gate); 275 if (ret) 276 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", 277 gate ? "gate" : "ungate"); 278 break; 279 case AMD_IP_BLOCK_TYPE_GFX: 280 ret = smu_gfx_off_control(smu, gate); 281 if (ret) 282 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 283 gate ? "enable" : "disable"); 284 break; 285 case AMD_IP_BLOCK_TYPE_SDMA: 286 ret = smu_powergate_sdma(smu, gate); 287 if (ret) 288 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 289 gate ? "gate" : "ungate"); 290 break; 291 case AMD_IP_BLOCK_TYPE_JPEG: 292 ret = smu_dpm_set_jpeg_enable(smu, !gate); 293 if (ret) 294 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 295 gate ? "gate" : "ungate"); 296 break; 297 default: 298 dev_err(smu->adev->dev, "Unsupported block type!\n"); 299 return -EINVAL; 300 } 301 302 return ret; 303 } 304 305 /** 306 * smu_set_user_clk_dependencies - set user profile clock dependencies 307 * 308 * @smu: smu_context pointer 309 * @clk: enum smu_clk_type type 310 * 311 * Enable/Disable the clock dependency for the @clk type. 312 */ 313 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) 314 { 315 if (smu->adev->in_suspend) 316 return; 317 318 /* 319 * mclk, fclk and socclk are interdependent 320 * on each other 321 */ 322 if (clk == SMU_MCLK) { 323 /* reset clock dependency */ 324 smu->user_dpm_profile.clk_dependency = 0; 325 /* set mclk dependent clocks(fclk and socclk) */ 326 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); 327 } else if (clk == SMU_FCLK) { 328 /* give priority to mclk, if mclk dependent clocks are set */ 329 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 330 return; 331 332 /* reset clock dependency */ 333 smu->user_dpm_profile.clk_dependency = 0; 334 /* set fclk dependent clocks(mclk and socclk) */ 335 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); 336 } else if (clk == SMU_SOCCLK) { 337 /* give priority to mclk, if mclk dependent clocks are set */ 338 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 339 return; 340 341 /* reset clock dependency */ 342 smu->user_dpm_profile.clk_dependency = 0; 343 /* set socclk dependent clocks(mclk and fclk) */ 344 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); 345 } else 346 /* add clk dependencies here, if any */ 347 return; 348 } 349 350 /** 351 * smu_restore_dpm_user_profile - reinstate user dpm profile 352 * 353 * @smu: smu_context pointer 354 * 355 * Restore the saved user power configurations include power limit, 356 * clock frequencies, fan control mode and fan speed. 357 */ 358 static void smu_restore_dpm_user_profile(struct smu_context *smu) 359 { 360 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 361 int ret = 0; 362 363 if (!smu->adev->in_suspend) 364 return; 365 366 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 367 return; 368 369 /* Enable restore flag */ 370 smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE; 371 372 /* set the user dpm power limit */ 373 if (smu->user_dpm_profile.power_limit) { 374 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); 375 if (ret) 376 dev_err(smu->adev->dev, "Failed to set power limit value\n"); 377 } 378 379 /* set the user dpm clock configurations */ 380 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 381 enum smu_clk_type clk_type; 382 383 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) { 384 /* 385 * Iterate over smu clk type and force the saved user clk 386 * configs, skip if clock dependency is enabled 387 */ 388 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && 389 smu->user_dpm_profile.clk_mask[clk_type]) { 390 ret = smu_force_smuclk_levels(smu, clk_type, 391 smu->user_dpm_profile.clk_mask[clk_type]); 392 if (ret) 393 dev_err(smu->adev->dev, "Failed to set clock type = %d\n", 394 clk_type); 395 } 396 } 397 } 398 399 /* set the user dpm fan configurations */ 400 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL) { 401 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); 402 if (ret) { 403 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); 404 return; 405 } 406 407 if (!ret && smu->user_dpm_profile.fan_speed_percent) { 408 ret = smu_set_fan_speed_percent(smu, smu->user_dpm_profile.fan_speed_percent); 409 if (ret) 410 dev_err(smu->adev->dev, "Failed to set manual fan speed\n"); 411 } 412 } 413 414 /* Disable restore flag */ 415 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; 416 } 417 418 int smu_get_power_num_states(void *handle, 419 struct pp_states_info *state_info) 420 { 421 if (!state_info) 422 return -EINVAL; 423 424 /* not support power state */ 425 memset(state_info, 0, sizeof(struct pp_states_info)); 426 state_info->nums = 1; 427 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 428 429 return 0; 430 } 431 432 bool is_support_sw_smu(struct amdgpu_device *adev) 433 { 434 if (adev->asic_type >= CHIP_ARCTURUS) 435 return true; 436 437 return false; 438 } 439 440 bool is_support_cclk_dpm(struct amdgpu_device *adev) 441 { 442 struct smu_context *smu = &adev->smu; 443 444 if (!is_support_sw_smu(adev)) 445 return false; 446 447 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) 448 return false; 449 450 return true; 451 } 452 453 454 int smu_sys_get_pp_table(void *handle, char **table) 455 { 456 struct smu_context *smu = handle; 457 struct smu_table_context *smu_table = &smu->smu_table; 458 uint32_t powerplay_table_size; 459 460 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 461 return -EOPNOTSUPP; 462 463 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 464 return -EINVAL; 465 466 mutex_lock(&smu->mutex); 467 468 if (smu_table->hardcode_pptable) 469 *table = smu_table->hardcode_pptable; 470 else 471 *table = smu_table->power_play_table; 472 473 powerplay_table_size = smu_table->power_play_table_size; 474 475 mutex_unlock(&smu->mutex); 476 477 return powerplay_table_size; 478 } 479 480 int smu_sys_set_pp_table(void *handle, const char *buf, size_t size) 481 { 482 struct smu_context *smu = handle; 483 struct smu_table_context *smu_table = &smu->smu_table; 484 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 485 int ret = 0; 486 487 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 488 return -EOPNOTSUPP; 489 490 if (header->usStructureSize != size) { 491 dev_err(smu->adev->dev, "pp table size not matched !\n"); 492 return -EIO; 493 } 494 495 mutex_lock(&smu->mutex); 496 if (!smu_table->hardcode_pptable) 497 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 498 if (!smu_table->hardcode_pptable) { 499 ret = -ENOMEM; 500 goto failed; 501 } 502 503 memcpy(smu_table->hardcode_pptable, buf, size); 504 smu_table->power_play_table = smu_table->hardcode_pptable; 505 smu_table->power_play_table_size = size; 506 507 /* 508 * Special hw_fini action(for Navi1x, the DPMs disablement will be 509 * skipped) may be needed for custom pptable uploading. 510 */ 511 smu->uploading_custom_pp_table = true; 512 513 ret = smu_reset(smu); 514 if (ret) 515 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 516 517 smu->uploading_custom_pp_table = false; 518 519 failed: 520 mutex_unlock(&smu->mutex); 521 return ret; 522 } 523 524 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 525 { 526 struct smu_feature *feature = &smu->smu_feature; 527 int ret = 0; 528 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 529 530 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 531 532 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 533 SMU_FEATURE_MAX/32); 534 if (ret) 535 return ret; 536 537 bitmap_or(feature->allowed, feature->allowed, 538 (unsigned long *)allowed_feature_mask, 539 feature->feature_num); 540 541 return ret; 542 } 543 544 static int smu_set_funcs(struct amdgpu_device *adev) 545 { 546 struct smu_context *smu = &adev->smu; 547 548 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 549 smu->od_enabled = true; 550 551 switch (adev->asic_type) { 552 case CHIP_NAVI10: 553 case CHIP_NAVI14: 554 case CHIP_NAVI12: 555 navi10_set_ppt_funcs(smu); 556 break; 557 case CHIP_ARCTURUS: 558 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 559 arcturus_set_ppt_funcs(smu); 560 /* OD is not supported on Arcturus */ 561 smu->od_enabled =false; 562 break; 563 case CHIP_SIENNA_CICHLID: 564 case CHIP_NAVY_FLOUNDER: 565 case CHIP_DIMGREY_CAVEFISH: 566 sienna_cichlid_set_ppt_funcs(smu); 567 break; 568 case CHIP_RENOIR: 569 renoir_set_ppt_funcs(smu); 570 break; 571 case CHIP_VANGOGH: 572 vangogh_set_ppt_funcs(smu); 573 break; 574 default: 575 return -EINVAL; 576 } 577 578 return 0; 579 } 580 581 static int smu_early_init(void *handle) 582 { 583 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 584 struct smu_context *smu = &adev->smu; 585 586 smu->adev = adev; 587 smu->pm_enabled = !!amdgpu_dpm; 588 smu->is_apu = false; 589 mutex_init(&smu->mutex); 590 mutex_init(&smu->smu_baco.mutex); 591 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 592 smu->smu_baco.platform_support = false; 593 594 adev->powerplay.pp_handle = smu; 595 adev->powerplay.pp_funcs = &swsmu_pm_funcs; 596 597 return smu_set_funcs(adev); 598 } 599 600 static int smu_set_default_dpm_table(struct smu_context *smu) 601 { 602 struct smu_power_context *smu_power = &smu->smu_power; 603 struct smu_power_gate *power_gate = &smu_power->power_gate; 604 int vcn_gate, jpeg_gate; 605 int ret = 0; 606 607 if (!smu->ppt_funcs->set_default_dpm_table) 608 return 0; 609 610 mutex_lock(&power_gate->vcn_gate_lock); 611 mutex_lock(&power_gate->jpeg_gate_lock); 612 613 vcn_gate = atomic_read(&power_gate->vcn_gated); 614 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 615 616 ret = smu_dpm_set_vcn_enable_locked(smu, true); 617 if (ret) 618 goto err0_out; 619 620 ret = smu_dpm_set_jpeg_enable_locked(smu, true); 621 if (ret) 622 goto err1_out; 623 624 ret = smu->ppt_funcs->set_default_dpm_table(smu); 625 if (ret) 626 dev_err(smu->adev->dev, 627 "Failed to setup default dpm clock tables!\n"); 628 629 smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate); 630 err1_out: 631 smu_dpm_set_vcn_enable_locked(smu, !vcn_gate); 632 err0_out: 633 mutex_unlock(&power_gate->jpeg_gate_lock); 634 mutex_unlock(&power_gate->vcn_gate_lock); 635 636 return ret; 637 } 638 639 static int smu_late_init(void *handle) 640 { 641 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 642 struct smu_context *smu = &adev->smu; 643 int ret = 0; 644 645 smu_set_fine_grain_gfx_freq_parameters(smu); 646 647 if (!smu->pm_enabled) 648 return 0; 649 650 ret = smu_post_init(smu); 651 if (ret) { 652 dev_err(adev->dev, "Failed to post smu init!\n"); 653 return ret; 654 } 655 656 ret = smu_set_default_od_settings(smu); 657 if (ret) { 658 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 659 return ret; 660 } 661 662 ret = smu_populate_umd_state_clk(smu); 663 if (ret) { 664 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 665 return ret; 666 } 667 668 ret = smu_get_asic_power_limits(smu); 669 if (ret) { 670 dev_err(adev->dev, "Failed to get asic power limits!\n"); 671 return ret; 672 } 673 674 smu_get_unique_id(smu); 675 676 smu_get_fan_parameters(smu); 677 678 smu_handle_task(&adev->smu, 679 smu->smu_dpm.dpm_level, 680 AMD_PP_TASK_COMPLETE_INIT, 681 false); 682 683 smu_restore_dpm_user_profile(smu); 684 685 return 0; 686 } 687 688 static int smu_init_fb_allocations(struct smu_context *smu) 689 { 690 struct amdgpu_device *adev = smu->adev; 691 struct smu_table_context *smu_table = &smu->smu_table; 692 struct smu_table *tables = smu_table->tables; 693 struct smu_table *driver_table = &(smu_table->driver_table); 694 uint32_t max_table_size = 0; 695 int ret, i; 696 697 /* VRAM allocation for tool table */ 698 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 699 ret = amdgpu_bo_create_kernel(adev, 700 tables[SMU_TABLE_PMSTATUSLOG].size, 701 tables[SMU_TABLE_PMSTATUSLOG].align, 702 tables[SMU_TABLE_PMSTATUSLOG].domain, 703 &tables[SMU_TABLE_PMSTATUSLOG].bo, 704 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 705 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 706 if (ret) { 707 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 708 return ret; 709 } 710 } 711 712 /* VRAM allocation for driver table */ 713 for (i = 0; i < SMU_TABLE_COUNT; i++) { 714 if (tables[i].size == 0) 715 continue; 716 717 if (i == SMU_TABLE_PMSTATUSLOG) 718 continue; 719 720 if (max_table_size < tables[i].size) 721 max_table_size = tables[i].size; 722 } 723 724 driver_table->size = max_table_size; 725 driver_table->align = PAGE_SIZE; 726 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 727 728 ret = amdgpu_bo_create_kernel(adev, 729 driver_table->size, 730 driver_table->align, 731 driver_table->domain, 732 &driver_table->bo, 733 &driver_table->mc_address, 734 &driver_table->cpu_addr); 735 if (ret) { 736 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 737 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 738 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 739 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 740 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 741 } 742 743 return ret; 744 } 745 746 static int smu_fini_fb_allocations(struct smu_context *smu) 747 { 748 struct smu_table_context *smu_table = &smu->smu_table; 749 struct smu_table *tables = smu_table->tables; 750 struct smu_table *driver_table = &(smu_table->driver_table); 751 752 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 753 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 754 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 755 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 756 757 amdgpu_bo_free_kernel(&driver_table->bo, 758 &driver_table->mc_address, 759 &driver_table->cpu_addr); 760 761 return 0; 762 } 763 764 /** 765 * smu_alloc_memory_pool - allocate memory pool in the system memory 766 * 767 * @smu: amdgpu_device pointer 768 * 769 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 770 * and DramLogSetDramAddr can notify it changed. 771 * 772 * Returns 0 on success, error on failure. 773 */ 774 static int smu_alloc_memory_pool(struct smu_context *smu) 775 { 776 struct amdgpu_device *adev = smu->adev; 777 struct smu_table_context *smu_table = &smu->smu_table; 778 struct smu_table *memory_pool = &smu_table->memory_pool; 779 uint64_t pool_size = smu->pool_size; 780 int ret = 0; 781 782 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 783 return ret; 784 785 memory_pool->size = pool_size; 786 memory_pool->align = PAGE_SIZE; 787 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 788 789 switch (pool_size) { 790 case SMU_MEMORY_POOL_SIZE_256_MB: 791 case SMU_MEMORY_POOL_SIZE_512_MB: 792 case SMU_MEMORY_POOL_SIZE_1_GB: 793 case SMU_MEMORY_POOL_SIZE_2_GB: 794 ret = amdgpu_bo_create_kernel(adev, 795 memory_pool->size, 796 memory_pool->align, 797 memory_pool->domain, 798 &memory_pool->bo, 799 &memory_pool->mc_address, 800 &memory_pool->cpu_addr); 801 if (ret) 802 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 803 break; 804 default: 805 break; 806 } 807 808 return ret; 809 } 810 811 static int smu_free_memory_pool(struct smu_context *smu) 812 { 813 struct smu_table_context *smu_table = &smu->smu_table; 814 struct smu_table *memory_pool = &smu_table->memory_pool; 815 816 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 817 return 0; 818 819 amdgpu_bo_free_kernel(&memory_pool->bo, 820 &memory_pool->mc_address, 821 &memory_pool->cpu_addr); 822 823 memset(memory_pool, 0, sizeof(struct smu_table)); 824 825 return 0; 826 } 827 828 static int smu_alloc_dummy_read_table(struct smu_context *smu) 829 { 830 struct smu_table_context *smu_table = &smu->smu_table; 831 struct smu_table *dummy_read_1_table = 832 &smu_table->dummy_read_1_table; 833 struct amdgpu_device *adev = smu->adev; 834 int ret = 0; 835 836 dummy_read_1_table->size = 0x40000; 837 dummy_read_1_table->align = PAGE_SIZE; 838 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 839 840 ret = amdgpu_bo_create_kernel(adev, 841 dummy_read_1_table->size, 842 dummy_read_1_table->align, 843 dummy_read_1_table->domain, 844 &dummy_read_1_table->bo, 845 &dummy_read_1_table->mc_address, 846 &dummy_read_1_table->cpu_addr); 847 if (ret) 848 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 849 850 return ret; 851 } 852 853 static void smu_free_dummy_read_table(struct smu_context *smu) 854 { 855 struct smu_table_context *smu_table = &smu->smu_table; 856 struct smu_table *dummy_read_1_table = 857 &smu_table->dummy_read_1_table; 858 859 860 amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 861 &dummy_read_1_table->mc_address, 862 &dummy_read_1_table->cpu_addr); 863 864 memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 865 } 866 867 static int smu_smc_table_sw_init(struct smu_context *smu) 868 { 869 int ret; 870 871 /** 872 * Create smu_table structure, and init smc tables such as 873 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 874 */ 875 ret = smu_init_smc_tables(smu); 876 if (ret) { 877 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 878 return ret; 879 } 880 881 /** 882 * Create smu_power_context structure, and allocate smu_dpm_context and 883 * context size to fill the smu_power_context data. 884 */ 885 ret = smu_init_power(smu); 886 if (ret) { 887 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 888 return ret; 889 } 890 891 /* 892 * allocate vram bos to store smc table contents. 893 */ 894 ret = smu_init_fb_allocations(smu); 895 if (ret) 896 return ret; 897 898 ret = smu_alloc_memory_pool(smu); 899 if (ret) 900 return ret; 901 902 ret = smu_alloc_dummy_read_table(smu); 903 if (ret) 904 return ret; 905 906 ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c); 907 if (ret) 908 return ret; 909 910 return 0; 911 } 912 913 static int smu_smc_table_sw_fini(struct smu_context *smu) 914 { 915 int ret; 916 917 smu_i2c_fini(smu, &smu->adev->pm.smu_i2c); 918 919 smu_free_dummy_read_table(smu); 920 921 ret = smu_free_memory_pool(smu); 922 if (ret) 923 return ret; 924 925 ret = smu_fini_fb_allocations(smu); 926 if (ret) 927 return ret; 928 929 ret = smu_fini_power(smu); 930 if (ret) { 931 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 932 return ret; 933 } 934 935 ret = smu_fini_smc_tables(smu); 936 if (ret) { 937 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 938 return ret; 939 } 940 941 return 0; 942 } 943 944 static void smu_throttling_logging_work_fn(struct work_struct *work) 945 { 946 struct smu_context *smu = container_of(work, struct smu_context, 947 throttling_logging_work); 948 949 smu_log_thermal_throttling(smu); 950 } 951 952 static void smu_interrupt_work_fn(struct work_struct *work) 953 { 954 struct smu_context *smu = container_of(work, struct smu_context, 955 interrupt_work); 956 957 mutex_lock(&smu->mutex); 958 959 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 960 smu->ppt_funcs->interrupt_work(smu); 961 962 mutex_unlock(&smu->mutex); 963 } 964 965 static int smu_sw_init(void *handle) 966 { 967 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 968 struct smu_context *smu = &adev->smu; 969 int ret; 970 971 smu->pool_size = adev->pm.smu_prv_buffer_size; 972 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 973 mutex_init(&smu->smu_feature.mutex); 974 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 975 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); 976 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 977 978 mutex_init(&smu->sensor_lock); 979 mutex_init(&smu->metrics_lock); 980 mutex_init(&smu->message_lock); 981 982 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 983 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 984 atomic64_set(&smu->throttle_int_counter, 0); 985 smu->watermarks_bitmap = 0; 986 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 987 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 988 989 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 990 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 991 mutex_init(&smu->smu_power.power_gate.vcn_gate_lock); 992 mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock); 993 994 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 995 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 996 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 997 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 998 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 999 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 1000 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 1001 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 1002 1003 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1004 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1005 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 1006 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 1007 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 1008 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 1009 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 1010 smu->display_config = &adev->pm.pm_display_cfg; 1011 1012 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1013 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1014 1015 ret = smu_init_microcode(smu); 1016 if (ret) { 1017 dev_err(adev->dev, "Failed to load smu firmware!\n"); 1018 return ret; 1019 } 1020 1021 ret = smu_smc_table_sw_init(smu); 1022 if (ret) { 1023 dev_err(adev->dev, "Failed to sw init smc table!\n"); 1024 return ret; 1025 } 1026 1027 ret = smu_register_irq_handler(smu); 1028 if (ret) { 1029 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 1030 return ret; 1031 } 1032 1033 return 0; 1034 } 1035 1036 static int smu_sw_fini(void *handle) 1037 { 1038 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1039 struct smu_context *smu = &adev->smu; 1040 int ret; 1041 1042 ret = smu_smc_table_sw_fini(smu); 1043 if (ret) { 1044 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 1045 return ret; 1046 } 1047 1048 smu_fini_microcode(smu); 1049 1050 return 0; 1051 } 1052 1053 static int smu_get_thermal_temperature_range(struct smu_context *smu) 1054 { 1055 struct amdgpu_device *adev = smu->adev; 1056 struct smu_temperature_range *range = 1057 &smu->thermal_range; 1058 int ret = 0; 1059 1060 if (!smu->ppt_funcs->get_thermal_temperature_range) 1061 return 0; 1062 1063 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 1064 if (ret) 1065 return ret; 1066 1067 adev->pm.dpm.thermal.min_temp = range->min; 1068 adev->pm.dpm.thermal.max_temp = range->max; 1069 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 1070 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 1071 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 1072 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 1073 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 1074 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 1075 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 1076 1077 return ret; 1078 } 1079 1080 static int smu_smc_hw_setup(struct smu_context *smu) 1081 { 1082 struct amdgpu_device *adev = smu->adev; 1083 uint32_t pcie_gen = 0, pcie_width = 0; 1084 int ret = 0; 1085 1086 if (adev->in_suspend && smu_is_dpm_running(smu)) { 1087 dev_info(adev->dev, "dpm has been enabled\n"); 1088 /* this is needed specifically */ 1089 if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && 1090 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) 1091 ret = smu_system_features_control(smu, true); 1092 return ret; 1093 } 1094 1095 ret = smu_init_display_count(smu, 0); 1096 if (ret) { 1097 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 1098 return ret; 1099 } 1100 1101 ret = smu_set_driver_table_location(smu); 1102 if (ret) { 1103 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 1104 return ret; 1105 } 1106 1107 /* 1108 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1109 */ 1110 ret = smu_set_tool_table_location(smu); 1111 if (ret) { 1112 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 1113 return ret; 1114 } 1115 1116 /* 1117 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1118 * pool location. 1119 */ 1120 ret = smu_notify_memory_pool_location(smu); 1121 if (ret) { 1122 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 1123 return ret; 1124 } 1125 1126 /* smu_dump_pptable(smu); */ 1127 /* 1128 * Copy pptable bo in the vram to smc with SMU MSGs such as 1129 * SetDriverDramAddr and TransferTableDram2Smu. 1130 */ 1131 ret = smu_write_pptable(smu); 1132 if (ret) { 1133 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 1134 return ret; 1135 } 1136 1137 /* issue Run*Btc msg */ 1138 ret = smu_run_btc(smu); 1139 if (ret) 1140 return ret; 1141 1142 ret = smu_feature_set_allowed_mask(smu); 1143 if (ret) { 1144 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 1145 return ret; 1146 } 1147 1148 ret = smu_system_features_control(smu, true); 1149 if (ret) { 1150 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 1151 return ret; 1152 } 1153 1154 if (!smu_is_dpm_running(smu)) 1155 dev_info(adev->dev, "dpm has been disabled\n"); 1156 1157 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1158 pcie_gen = 3; 1159 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1160 pcie_gen = 2; 1161 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1162 pcie_gen = 1; 1163 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1164 pcie_gen = 0; 1165 1166 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1167 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1168 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1169 */ 1170 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1171 pcie_width = 6; 1172 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1173 pcie_width = 5; 1174 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1175 pcie_width = 4; 1176 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1177 pcie_width = 3; 1178 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1179 pcie_width = 2; 1180 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1181 pcie_width = 1; 1182 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1183 if (ret) { 1184 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1185 return ret; 1186 } 1187 1188 ret = smu_get_thermal_temperature_range(smu); 1189 if (ret) { 1190 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1191 return ret; 1192 } 1193 1194 ret = smu_enable_thermal_alert(smu); 1195 if (ret) { 1196 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1197 return ret; 1198 } 1199 1200 /* 1201 * Set initialized values (get from vbios) to dpm tables context such as 1202 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1203 * type of clks. 1204 */ 1205 ret = smu_set_default_dpm_table(smu); 1206 if (ret) { 1207 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 1208 return ret; 1209 } 1210 1211 ret = smu_notify_display_change(smu); 1212 if (ret) 1213 return ret; 1214 1215 /* 1216 * Set min deep sleep dce fclk with bootup value from vbios via 1217 * SetMinDeepSleepDcefclk MSG. 1218 */ 1219 ret = smu_set_min_dcef_deep_sleep(smu, 1220 smu->smu_table.boot_values.dcefclk / 100); 1221 if (ret) 1222 return ret; 1223 1224 return ret; 1225 } 1226 1227 static int smu_start_smc_engine(struct smu_context *smu) 1228 { 1229 struct amdgpu_device *adev = smu->adev; 1230 int ret = 0; 1231 1232 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1233 if (adev->asic_type < CHIP_NAVI10) { 1234 if (smu->ppt_funcs->load_microcode) { 1235 ret = smu->ppt_funcs->load_microcode(smu); 1236 if (ret) 1237 return ret; 1238 } 1239 } 1240 } 1241 1242 if (smu->ppt_funcs->check_fw_status) { 1243 ret = smu->ppt_funcs->check_fw_status(smu); 1244 if (ret) { 1245 dev_err(adev->dev, "SMC is not ready\n"); 1246 return ret; 1247 } 1248 } 1249 1250 /* 1251 * Send msg GetDriverIfVersion to check if the return value is equal 1252 * with DRIVER_IF_VERSION of smc header. 1253 */ 1254 ret = smu_check_fw_version(smu); 1255 if (ret) 1256 return ret; 1257 1258 return ret; 1259 } 1260 1261 static int smu_hw_init(void *handle) 1262 { 1263 int ret; 1264 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1265 struct smu_context *smu = &adev->smu; 1266 1267 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 1268 smu->pm_enabled = false; 1269 return 0; 1270 } 1271 1272 ret = smu_start_smc_engine(smu); 1273 if (ret) { 1274 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1275 return ret; 1276 } 1277 1278 if (smu->is_apu) { 1279 smu_powergate_sdma(&adev->smu, false); 1280 smu_dpm_set_vcn_enable(smu, true); 1281 smu_dpm_set_jpeg_enable(smu, true); 1282 smu_set_gfx_cgpg(&adev->smu, true); 1283 } 1284 1285 if (!smu->pm_enabled) 1286 return 0; 1287 1288 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1289 ret = smu_get_vbios_bootup_values(smu); 1290 if (ret) { 1291 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1292 return ret; 1293 } 1294 1295 ret = smu_setup_pptable(smu); 1296 if (ret) { 1297 dev_err(adev->dev, "Failed to setup pptable!\n"); 1298 return ret; 1299 } 1300 1301 ret = smu_get_driver_allowed_feature_mask(smu); 1302 if (ret) 1303 return ret; 1304 1305 ret = smu_smc_hw_setup(smu); 1306 if (ret) { 1307 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1308 return ret; 1309 } 1310 1311 /* 1312 * Move maximum sustainable clock retrieving here considering 1313 * 1. It is not needed on resume(from S3). 1314 * 2. DAL settings come between .hw_init and .late_init of SMU. 1315 * And DAL needs to know the maximum sustainable clocks. Thus 1316 * it cannot be put in .late_init(). 1317 */ 1318 ret = smu_init_max_sustainable_clocks(smu); 1319 if (ret) { 1320 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1321 return ret; 1322 } 1323 1324 adev->pm.dpm_enabled = true; 1325 1326 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1327 1328 return 0; 1329 } 1330 1331 static int smu_disable_dpms(struct smu_context *smu) 1332 { 1333 struct amdgpu_device *adev = smu->adev; 1334 int ret = 0; 1335 bool use_baco = !smu->is_apu && 1336 ((amdgpu_in_reset(adev) && 1337 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1338 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); 1339 1340 /* 1341 * For custom pptable uploading, skip the DPM features 1342 * disable process on Navi1x ASICs. 1343 * - As the gfx related features are under control of 1344 * RLC on those ASICs. RLC reinitialization will be 1345 * needed to reenable them. That will cost much more 1346 * efforts. 1347 * 1348 * - SMU firmware can handle the DPM reenablement 1349 * properly. 1350 */ 1351 if (smu->uploading_custom_pp_table && 1352 (adev->asic_type >= CHIP_NAVI10) && 1353 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) 1354 return 0; 1355 1356 /* 1357 * For Sienna_Cichlid, PMFW will handle the features disablement properly 1358 * on BACO in. Driver involvement is unnecessary. 1359 */ 1360 if ((adev->asic_type == CHIP_SIENNA_CICHLID) && 1361 use_baco) 1362 return 0; 1363 1364 /* 1365 * For gpu reset, runpm and hibernation through BACO, 1366 * BACO feature has to be kept enabled. 1367 */ 1368 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1369 ret = smu_disable_all_features_with_exception(smu, 1370 SMU_FEATURE_BACO_BIT); 1371 if (ret) 1372 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 1373 } else { 1374 ret = smu_system_features_control(smu, false); 1375 if (ret) 1376 dev_err(adev->dev, "Failed to disable smu features.\n"); 1377 } 1378 1379 if (adev->asic_type >= CHIP_NAVI10 && 1380 adev->gfx.rlc.funcs->stop) 1381 adev->gfx.rlc.funcs->stop(adev); 1382 1383 return ret; 1384 } 1385 1386 static int smu_smc_hw_cleanup(struct smu_context *smu) 1387 { 1388 struct amdgpu_device *adev = smu->adev; 1389 int ret = 0; 1390 1391 cancel_work_sync(&smu->throttling_logging_work); 1392 cancel_work_sync(&smu->interrupt_work); 1393 1394 ret = smu_disable_thermal_alert(smu); 1395 if (ret) { 1396 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 1397 return ret; 1398 } 1399 1400 ret = smu_disable_dpms(smu); 1401 if (ret) { 1402 dev_err(adev->dev, "Fail to disable dpm features!\n"); 1403 return ret; 1404 } 1405 1406 return 0; 1407 } 1408 1409 static int smu_hw_fini(void *handle) 1410 { 1411 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1412 struct smu_context *smu = &adev->smu; 1413 1414 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1415 return 0; 1416 1417 if (smu->is_apu) { 1418 smu_powergate_sdma(&adev->smu, true); 1419 smu_dpm_set_vcn_enable(smu, false); 1420 smu_dpm_set_jpeg_enable(smu, false); 1421 } 1422 1423 if (!smu->pm_enabled) 1424 return 0; 1425 1426 adev->pm.dpm_enabled = false; 1427 1428 return smu_smc_hw_cleanup(smu); 1429 } 1430 1431 int smu_reset(struct smu_context *smu) 1432 { 1433 struct amdgpu_device *adev = smu->adev; 1434 int ret; 1435 1436 amdgpu_gfx_off_ctrl(smu->adev, false); 1437 1438 ret = smu_hw_fini(adev); 1439 if (ret) 1440 return ret; 1441 1442 ret = smu_hw_init(adev); 1443 if (ret) 1444 return ret; 1445 1446 ret = smu_late_init(adev); 1447 if (ret) 1448 return ret; 1449 1450 amdgpu_gfx_off_ctrl(smu->adev, true); 1451 1452 return 0; 1453 } 1454 1455 static int smu_suspend(void *handle) 1456 { 1457 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1458 struct smu_context *smu = &adev->smu; 1459 int ret; 1460 1461 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1462 return 0; 1463 1464 if (!smu->pm_enabled) 1465 return 0; 1466 1467 adev->pm.dpm_enabled = false; 1468 1469 ret = smu_smc_hw_cleanup(smu); 1470 if (ret) 1471 return ret; 1472 1473 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1474 1475 if (smu->is_apu) 1476 smu_set_gfx_cgpg(&adev->smu, false); 1477 1478 return 0; 1479 } 1480 1481 static int smu_resume(void *handle) 1482 { 1483 int ret; 1484 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1485 struct smu_context *smu = &adev->smu; 1486 1487 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1488 return 0; 1489 1490 if (!smu->pm_enabled) 1491 return 0; 1492 1493 dev_info(adev->dev, "SMU is resuming...\n"); 1494 1495 ret = smu_start_smc_engine(smu); 1496 if (ret) { 1497 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1498 return ret; 1499 } 1500 1501 ret = smu_smc_hw_setup(smu); 1502 if (ret) { 1503 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1504 return ret; 1505 } 1506 1507 if (smu->is_apu) 1508 smu_set_gfx_cgpg(&adev->smu, true); 1509 1510 smu->disable_uclk_switch = 0; 1511 1512 adev->pm.dpm_enabled = true; 1513 1514 dev_info(adev->dev, "SMU is resumed successfully!\n"); 1515 1516 return 0; 1517 } 1518 1519 int smu_display_configuration_change(struct smu_context *smu, 1520 const struct amd_pp_display_configuration *display_config) 1521 { 1522 int index = 0; 1523 int num_of_active_display = 0; 1524 1525 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1526 return -EOPNOTSUPP; 1527 1528 if (!display_config) 1529 return -EINVAL; 1530 1531 mutex_lock(&smu->mutex); 1532 1533 smu_set_min_dcef_deep_sleep(smu, 1534 display_config->min_dcef_deep_sleep_set_clk / 100); 1535 1536 for (index = 0; index < display_config->num_path_including_non_display; index++) { 1537 if (display_config->displays[index].controller_id != 0) 1538 num_of_active_display++; 1539 } 1540 1541 mutex_unlock(&smu->mutex); 1542 1543 return 0; 1544 } 1545 1546 static int smu_set_clockgating_state(void *handle, 1547 enum amd_clockgating_state state) 1548 { 1549 return 0; 1550 } 1551 1552 static int smu_set_powergating_state(void *handle, 1553 enum amd_powergating_state state) 1554 { 1555 return 0; 1556 } 1557 1558 static int smu_enable_umd_pstate(void *handle, 1559 enum amd_dpm_forced_level *level) 1560 { 1561 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1562 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1563 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1564 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1565 1566 struct smu_context *smu = (struct smu_context*)(handle); 1567 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1568 1569 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1570 return -EINVAL; 1571 1572 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1573 /* enter umd pstate, save current level, disable gfx cg*/ 1574 if (*level & profile_mode_mask) { 1575 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1576 smu_dpm_ctx->enable_umd_pstate = true; 1577 smu_gpo_control(smu, false); 1578 amdgpu_device_ip_set_powergating_state(smu->adev, 1579 AMD_IP_BLOCK_TYPE_GFX, 1580 AMD_PG_STATE_UNGATE); 1581 amdgpu_device_ip_set_clockgating_state(smu->adev, 1582 AMD_IP_BLOCK_TYPE_GFX, 1583 AMD_CG_STATE_UNGATE); 1584 smu_gfx_ulv_control(smu, false); 1585 smu_deep_sleep_control(smu, false); 1586 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 1587 } 1588 } else { 1589 /* exit umd pstate, restore level, enable gfx cg*/ 1590 if (!(*level & profile_mode_mask)) { 1591 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1592 *level = smu_dpm_ctx->saved_dpm_level; 1593 smu_dpm_ctx->enable_umd_pstate = false; 1594 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 1595 smu_deep_sleep_control(smu, true); 1596 smu_gfx_ulv_control(smu, true); 1597 amdgpu_device_ip_set_clockgating_state(smu->adev, 1598 AMD_IP_BLOCK_TYPE_GFX, 1599 AMD_CG_STATE_GATE); 1600 amdgpu_device_ip_set_powergating_state(smu->adev, 1601 AMD_IP_BLOCK_TYPE_GFX, 1602 AMD_PG_STATE_GATE); 1603 smu_gpo_control(smu, true); 1604 } 1605 } 1606 1607 return 0; 1608 } 1609 1610 static int smu_bump_power_profile_mode(struct smu_context *smu, 1611 long *param, 1612 uint32_t param_size) 1613 { 1614 int ret = 0; 1615 1616 if (smu->ppt_funcs->set_power_profile_mode) 1617 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 1618 1619 return ret; 1620 } 1621 1622 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 1623 enum amd_dpm_forced_level level, 1624 bool skip_display_settings) 1625 { 1626 int ret = 0; 1627 int index = 0; 1628 long workload; 1629 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1630 1631 if (!skip_display_settings) { 1632 ret = smu_display_config_changed(smu); 1633 if (ret) { 1634 dev_err(smu->adev->dev, "Failed to change display config!"); 1635 return ret; 1636 } 1637 } 1638 1639 ret = smu_apply_clocks_adjust_rules(smu); 1640 if (ret) { 1641 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 1642 return ret; 1643 } 1644 1645 if (!skip_display_settings) { 1646 ret = smu_notify_smc_display_config(smu); 1647 if (ret) { 1648 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 1649 return ret; 1650 } 1651 } 1652 1653 if (smu_dpm_ctx->dpm_level != level) { 1654 ret = smu_asic_set_performance_level(smu, level); 1655 if (ret) { 1656 dev_err(smu->adev->dev, "Failed to set performance level!"); 1657 return ret; 1658 } 1659 1660 /* update the saved copy */ 1661 smu_dpm_ctx->dpm_level = level; 1662 } 1663 1664 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1665 index = fls(smu->workload_mask); 1666 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1667 workload = smu->workload_setting[index]; 1668 1669 if (smu->power_profile_mode != workload) 1670 smu_bump_power_profile_mode(smu, &workload, 0); 1671 } 1672 1673 return ret; 1674 } 1675 1676 int smu_handle_task(struct smu_context *smu, 1677 enum amd_dpm_forced_level level, 1678 enum amd_pp_task task_id, 1679 bool lock_needed) 1680 { 1681 int ret = 0; 1682 1683 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1684 return -EOPNOTSUPP; 1685 1686 if (lock_needed) 1687 mutex_lock(&smu->mutex); 1688 1689 switch (task_id) { 1690 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1691 ret = smu_pre_display_config_changed(smu); 1692 if (ret) 1693 goto out; 1694 ret = smu_adjust_power_state_dynamic(smu, level, false); 1695 break; 1696 case AMD_PP_TASK_COMPLETE_INIT: 1697 case AMD_PP_TASK_READJUST_POWER_STATE: 1698 ret = smu_adjust_power_state_dynamic(smu, level, true); 1699 break; 1700 default: 1701 break; 1702 } 1703 1704 out: 1705 if (lock_needed) 1706 mutex_unlock(&smu->mutex); 1707 1708 return ret; 1709 } 1710 1711 int smu_handle_dpm_task(void *handle, 1712 enum amd_pp_task task_id, 1713 enum amd_pm_state_type *user_state) 1714 { 1715 struct smu_context *smu = handle; 1716 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1717 1718 return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true); 1719 1720 } 1721 1722 1723 int smu_switch_power_profile(void *handle, 1724 enum PP_SMC_POWER_PROFILE type, 1725 bool en) 1726 { 1727 struct smu_context *smu = handle; 1728 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1729 long workload; 1730 uint32_t index; 1731 1732 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1733 return -EOPNOTSUPP; 1734 1735 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1736 return -EINVAL; 1737 1738 mutex_lock(&smu->mutex); 1739 1740 if (!en) { 1741 smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1742 index = fls(smu->workload_mask); 1743 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1744 workload = smu->workload_setting[index]; 1745 } else { 1746 smu->workload_mask |= (1 << smu->workload_prority[type]); 1747 index = fls(smu->workload_mask); 1748 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1749 workload = smu->workload_setting[index]; 1750 } 1751 1752 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1753 smu_bump_power_profile_mode(smu, &workload, 0); 1754 1755 mutex_unlock(&smu->mutex); 1756 1757 return 0; 1758 } 1759 1760 enum amd_dpm_forced_level smu_get_performance_level(void *handle) 1761 { 1762 struct smu_context *smu = handle; 1763 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1764 enum amd_dpm_forced_level level; 1765 1766 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1767 return -EOPNOTSUPP; 1768 1769 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1770 return -EINVAL; 1771 1772 mutex_lock(&(smu->mutex)); 1773 level = smu_dpm_ctx->dpm_level; 1774 mutex_unlock(&(smu->mutex)); 1775 1776 return level; 1777 } 1778 1779 int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level) 1780 { 1781 struct smu_context *smu = handle; 1782 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1783 int ret = 0; 1784 1785 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1786 return -EOPNOTSUPP; 1787 1788 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1789 return -EINVAL; 1790 1791 mutex_lock(&smu->mutex); 1792 1793 ret = smu_enable_umd_pstate(smu, &level); 1794 if (ret) { 1795 mutex_unlock(&smu->mutex); 1796 return ret; 1797 } 1798 1799 ret = smu_handle_task(smu, level, 1800 AMD_PP_TASK_READJUST_POWER_STATE, 1801 false); 1802 1803 mutex_unlock(&smu->mutex); 1804 1805 /* reset user dpm clock state */ 1806 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1807 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); 1808 smu->user_dpm_profile.clk_dependency = 0; 1809 } 1810 1811 return ret; 1812 } 1813 1814 int smu_set_display_count(struct smu_context *smu, uint32_t count) 1815 { 1816 int ret = 0; 1817 1818 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1819 return -EOPNOTSUPP; 1820 1821 mutex_lock(&smu->mutex); 1822 ret = smu_init_display_count(smu, count); 1823 mutex_unlock(&smu->mutex); 1824 1825 return ret; 1826 } 1827 1828 static int smu_force_smuclk_levels(struct smu_context *smu, 1829 enum smu_clk_type clk_type, 1830 uint32_t mask) 1831 { 1832 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1833 int ret = 0; 1834 1835 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1836 return -EOPNOTSUPP; 1837 1838 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1839 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 1840 return -EINVAL; 1841 } 1842 1843 mutex_lock(&smu->mutex); 1844 1845 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { 1846 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 1847 if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) { 1848 smu->user_dpm_profile.clk_mask[clk_type] = mask; 1849 smu_set_user_clk_dependencies(smu, clk_type); 1850 } 1851 } 1852 1853 mutex_unlock(&smu->mutex); 1854 1855 return ret; 1856 } 1857 1858 int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask) 1859 { 1860 struct smu_context *smu = handle; 1861 enum smu_clk_type clk_type; 1862 1863 switch (type) { 1864 case PP_SCLK: 1865 clk_type = SMU_SCLK; break; 1866 case PP_MCLK: 1867 clk_type = SMU_MCLK; break; 1868 case PP_PCIE: 1869 clk_type = SMU_PCIE; break; 1870 case PP_SOCCLK: 1871 clk_type = SMU_SOCCLK; break; 1872 case PP_FCLK: 1873 clk_type = SMU_FCLK; break; 1874 case PP_DCEFCLK: 1875 clk_type = SMU_DCEFCLK; break; 1876 case PP_VCLK: 1877 clk_type = SMU_VCLK; break; 1878 case PP_DCLK: 1879 clk_type = SMU_DCLK; break; 1880 case OD_SCLK: 1881 clk_type = SMU_OD_SCLK; break; 1882 case OD_MCLK: 1883 clk_type = SMU_OD_MCLK; break; 1884 case OD_VDDC_CURVE: 1885 clk_type = SMU_OD_VDDC_CURVE; break; 1886 case OD_RANGE: 1887 clk_type = SMU_OD_RANGE; break; 1888 default: 1889 return -EINVAL; 1890 } 1891 1892 return smu_force_smuclk_levels(smu, clk_type, mask); 1893 } 1894 1895 /* 1896 * On system suspending or resetting, the dpm_enabled 1897 * flag will be cleared. So that those SMU services which 1898 * are not supported will be gated. 1899 * However, the mp1 state setting should still be granted 1900 * even if the dpm_enabled cleared. 1901 */ 1902 int smu_set_mp1_state(void *handle, 1903 enum pp_mp1_state mp1_state) 1904 { 1905 struct smu_context *smu = handle; 1906 uint16_t msg; 1907 int ret; 1908 1909 if (!smu->pm_enabled) 1910 return -EOPNOTSUPP; 1911 1912 mutex_lock(&smu->mutex); 1913 1914 switch (mp1_state) { 1915 case PP_MP1_STATE_SHUTDOWN: 1916 msg = SMU_MSG_PrepareMp1ForShutdown; 1917 break; 1918 case PP_MP1_STATE_UNLOAD: 1919 msg = SMU_MSG_PrepareMp1ForUnload; 1920 break; 1921 case PP_MP1_STATE_RESET: 1922 msg = SMU_MSG_PrepareMp1ForReset; 1923 break; 1924 case PP_MP1_STATE_NONE: 1925 default: 1926 mutex_unlock(&smu->mutex); 1927 return 0; 1928 } 1929 1930 ret = smu_send_smc_msg(smu, msg, NULL); 1931 /* some asics may not support those messages */ 1932 if (ret == -EINVAL) 1933 ret = 0; 1934 if (ret) 1935 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1936 1937 mutex_unlock(&smu->mutex); 1938 1939 return ret; 1940 } 1941 1942 int smu_set_df_cstate(void *handle, 1943 enum pp_df_cstate state) 1944 { 1945 struct smu_context *smu = handle; 1946 int ret = 0; 1947 1948 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1949 return -EOPNOTSUPP; 1950 1951 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 1952 return 0; 1953 1954 mutex_lock(&smu->mutex); 1955 1956 ret = smu->ppt_funcs->set_df_cstate(smu, state); 1957 if (ret) 1958 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 1959 1960 mutex_unlock(&smu->mutex); 1961 1962 return ret; 1963 } 1964 1965 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) 1966 { 1967 int ret = 0; 1968 1969 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1970 return -EOPNOTSUPP; 1971 1972 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) 1973 return 0; 1974 1975 mutex_lock(&smu->mutex); 1976 1977 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); 1978 if (ret) 1979 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); 1980 1981 mutex_unlock(&smu->mutex); 1982 1983 return ret; 1984 } 1985 1986 int smu_write_watermarks_table(struct smu_context *smu) 1987 { 1988 int ret = 0; 1989 1990 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1991 return -EOPNOTSUPP; 1992 1993 mutex_lock(&smu->mutex); 1994 1995 ret = smu_set_watermarks_table(smu, NULL); 1996 1997 mutex_unlock(&smu->mutex); 1998 1999 return ret; 2000 } 2001 2002 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, 2003 struct pp_smu_wm_range_sets *clock_ranges) 2004 { 2005 int ret = 0; 2006 2007 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2008 return -EOPNOTSUPP; 2009 2010 if (smu->disable_watermark) 2011 return 0; 2012 2013 mutex_lock(&smu->mutex); 2014 2015 ret = smu_set_watermarks_table(smu, clock_ranges); 2016 2017 mutex_unlock(&smu->mutex); 2018 2019 return ret; 2020 } 2021 2022 int smu_set_ac_dc(struct smu_context *smu) 2023 { 2024 int ret = 0; 2025 2026 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2027 return -EOPNOTSUPP; 2028 2029 /* controlled by firmware */ 2030 if (smu->dc_controlled_by_gpio) 2031 return 0; 2032 2033 mutex_lock(&smu->mutex); 2034 ret = smu_set_power_source(smu, 2035 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 2036 SMU_POWER_SOURCE_DC); 2037 if (ret) 2038 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 2039 smu->adev->pm.ac_power ? "AC" : "DC"); 2040 mutex_unlock(&smu->mutex); 2041 2042 return ret; 2043 } 2044 2045 const struct amd_ip_funcs smu_ip_funcs = { 2046 .name = "smu", 2047 .early_init = smu_early_init, 2048 .late_init = smu_late_init, 2049 .sw_init = smu_sw_init, 2050 .sw_fini = smu_sw_fini, 2051 .hw_init = smu_hw_init, 2052 .hw_fini = smu_hw_fini, 2053 .suspend = smu_suspend, 2054 .resume = smu_resume, 2055 .is_idle = NULL, 2056 .check_soft_reset = NULL, 2057 .wait_for_idle = NULL, 2058 .soft_reset = NULL, 2059 .set_clockgating_state = smu_set_clockgating_state, 2060 .set_powergating_state = smu_set_powergating_state, 2061 .enable_umd_pstate = smu_enable_umd_pstate, 2062 }; 2063 2064 const struct amdgpu_ip_block_version smu_v11_0_ip_block = 2065 { 2066 .type = AMD_IP_BLOCK_TYPE_SMC, 2067 .major = 11, 2068 .minor = 0, 2069 .rev = 0, 2070 .funcs = &smu_ip_funcs, 2071 }; 2072 2073 const struct amdgpu_ip_block_version smu_v12_0_ip_block = 2074 { 2075 .type = AMD_IP_BLOCK_TYPE_SMC, 2076 .major = 12, 2077 .minor = 0, 2078 .rev = 0, 2079 .funcs = &smu_ip_funcs, 2080 }; 2081 2082 int smu_load_microcode(struct smu_context *smu) 2083 { 2084 int ret = 0; 2085 2086 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2087 return -EOPNOTSUPP; 2088 2089 mutex_lock(&smu->mutex); 2090 2091 if (smu->ppt_funcs->load_microcode) 2092 ret = smu->ppt_funcs->load_microcode(smu); 2093 2094 mutex_unlock(&smu->mutex); 2095 2096 return ret; 2097 } 2098 2099 int smu_check_fw_status(struct smu_context *smu) 2100 { 2101 int ret = 0; 2102 2103 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2104 return -EOPNOTSUPP; 2105 2106 mutex_lock(&smu->mutex); 2107 2108 if (smu->ppt_funcs->check_fw_status) 2109 ret = smu->ppt_funcs->check_fw_status(smu); 2110 2111 mutex_unlock(&smu->mutex); 2112 2113 return ret; 2114 } 2115 2116 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2117 { 2118 int ret = 0; 2119 2120 mutex_lock(&smu->mutex); 2121 2122 if (smu->ppt_funcs->set_gfx_cgpg) 2123 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2124 2125 mutex_unlock(&smu->mutex); 2126 2127 return ret; 2128 } 2129 2130 int smu_set_fan_speed_rpm(void *handle, uint32_t speed) 2131 { 2132 struct smu_context *smu = handle; 2133 u32 percent; 2134 int ret = 0; 2135 2136 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2137 return -EOPNOTSUPP; 2138 2139 mutex_lock(&smu->mutex); 2140 2141 if (smu->ppt_funcs->set_fan_speed_percent) { 2142 percent = speed * 100 / smu->fan_max_rpm; 2143 ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent); 2144 if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) 2145 smu->user_dpm_profile.fan_speed_percent = percent; 2146 } 2147 2148 mutex_unlock(&smu->mutex); 2149 2150 return ret; 2151 } 2152 2153 int smu_get_power_limit(struct smu_context *smu, 2154 uint32_t *limit, 2155 enum smu_ppt_limit_level limit_level) 2156 { 2157 uint32_t limit_type = *limit >> 24; 2158 int ret = 0; 2159 2160 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2161 return -EOPNOTSUPP; 2162 2163 mutex_lock(&smu->mutex); 2164 2165 if (limit_type != SMU_DEFAULT_PPT_LIMIT) { 2166 if (smu->ppt_funcs->get_ppt_limit) 2167 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); 2168 } else { 2169 switch (limit_level) { 2170 case SMU_PPT_LIMIT_CURRENT: 2171 *limit = smu->current_power_limit; 2172 break; 2173 case SMU_PPT_LIMIT_MAX: 2174 *limit = smu->max_power_limit; 2175 break; 2176 default: 2177 break; 2178 } 2179 } 2180 2181 mutex_unlock(&smu->mutex); 2182 2183 return ret; 2184 } 2185 2186 int smu_set_power_limit(void *handle, uint32_t limit) 2187 { 2188 struct smu_context *smu = handle; 2189 uint32_t limit_type = limit >> 24; 2190 int ret = 0; 2191 2192 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2193 return -EOPNOTSUPP; 2194 2195 mutex_lock(&smu->mutex); 2196 2197 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 2198 if (smu->ppt_funcs->set_power_limit) { 2199 ret = smu->ppt_funcs->set_power_limit(smu, limit); 2200 goto out; 2201 } 2202 2203 if (limit > smu->max_power_limit) { 2204 dev_err(smu->adev->dev, 2205 "New power limit (%d) is over the max allowed %d\n", 2206 limit, smu->max_power_limit); 2207 goto out; 2208 } 2209 2210 if (!limit) 2211 limit = smu->current_power_limit; 2212 2213 if (smu->ppt_funcs->set_power_limit) { 2214 ret = smu->ppt_funcs->set_power_limit(smu, limit); 2215 if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) 2216 smu->user_dpm_profile.power_limit = limit; 2217 } 2218 2219 out: 2220 mutex_unlock(&smu->mutex); 2221 2222 return ret; 2223 } 2224 2225 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 2226 { 2227 int ret = 0; 2228 2229 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2230 return -EOPNOTSUPP; 2231 2232 mutex_lock(&smu->mutex); 2233 2234 if (smu->ppt_funcs->print_clk_levels) 2235 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 2236 2237 mutex_unlock(&smu->mutex); 2238 2239 return ret; 2240 } 2241 2242 int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf) 2243 { 2244 struct smu_context *smu = handle; 2245 enum smu_clk_type clk_type; 2246 2247 switch (type) { 2248 case PP_SCLK: 2249 clk_type = SMU_SCLK; break; 2250 case PP_MCLK: 2251 clk_type = SMU_MCLK; break; 2252 case PP_PCIE: 2253 clk_type = SMU_PCIE; break; 2254 case PP_SOCCLK: 2255 clk_type = SMU_SOCCLK; break; 2256 case PP_FCLK: 2257 clk_type = SMU_FCLK; break; 2258 case PP_DCEFCLK: 2259 clk_type = SMU_DCEFCLK; break; 2260 case PP_VCLK: 2261 clk_type = SMU_VCLK; break; 2262 case PP_DCLK: 2263 clk_type = SMU_DCLK; break; 2264 case OD_SCLK: 2265 clk_type = SMU_OD_SCLK; break; 2266 case OD_MCLK: 2267 clk_type = SMU_OD_MCLK; break; 2268 case OD_VDDC_CURVE: 2269 clk_type = SMU_OD_VDDC_CURVE; break; 2270 case OD_RANGE: 2271 clk_type = SMU_OD_RANGE; break; 2272 case OD_VDDGFX_OFFSET: 2273 clk_type = SMU_OD_VDDGFX_OFFSET; break; 2274 case OD_CCLK: 2275 clk_type = SMU_OD_CCLK; break; 2276 default: 2277 return -EINVAL; 2278 } 2279 2280 return smu_print_smuclk_levels(smu, clk_type, buf); 2281 } 2282 2283 int smu_od_edit_dpm_table(void *handle, 2284 enum PP_OD_DPM_TABLE_COMMAND type, 2285 long *input, uint32_t size) 2286 { 2287 struct smu_context *smu = handle; 2288 int ret = 0; 2289 2290 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2291 return -EOPNOTSUPP; 2292 2293 mutex_lock(&smu->mutex); 2294 2295 if (smu->ppt_funcs->od_edit_dpm_table) { 2296 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 2297 } 2298 2299 mutex_unlock(&smu->mutex); 2300 2301 return ret; 2302 } 2303 2304 int smu_read_sensor(void *handle, int sensor, void *data, int *size_arg) 2305 { 2306 struct smu_context *smu = handle; 2307 struct smu_umd_pstate_table *pstate_table = 2308 &smu->pstate_table; 2309 int ret = 0; 2310 uint32_t *size, size_val; 2311 2312 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2313 return -EOPNOTSUPP; 2314 2315 if (!data || !size_arg) 2316 return -EINVAL; 2317 2318 size_val = *size_arg; 2319 size = &size_val; 2320 2321 mutex_lock(&smu->mutex); 2322 2323 if (smu->ppt_funcs->read_sensor) 2324 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 2325 goto unlock; 2326 2327 switch (sensor) { 2328 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2329 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 2330 *size = 4; 2331 break; 2332 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 2333 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2334 *size = 4; 2335 break; 2336 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2337 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); 2338 *size = 8; 2339 break; 2340 case AMDGPU_PP_SENSOR_UVD_POWER: 2341 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 2342 *size = 4; 2343 break; 2344 case AMDGPU_PP_SENSOR_VCE_POWER: 2345 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 2346 *size = 4; 2347 break; 2348 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2349 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; 2350 *size = 4; 2351 break; 2352 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 2353 *(uint32_t *)data = 0; 2354 *size = 4; 2355 break; 2356 default: 2357 *size = 0; 2358 ret = -EOPNOTSUPP; 2359 break; 2360 } 2361 2362 unlock: 2363 mutex_unlock(&smu->mutex); 2364 2365 // assign uint32_t to int 2366 *size_arg = size_val; 2367 2368 return ret; 2369 } 2370 2371 int smu_get_power_profile_mode(void *handle, char *buf) 2372 { 2373 struct smu_context *smu = handle; 2374 int ret = 0; 2375 2376 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2377 return -EOPNOTSUPP; 2378 2379 mutex_lock(&smu->mutex); 2380 2381 if (smu->ppt_funcs->get_power_profile_mode) 2382 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); 2383 2384 mutex_unlock(&smu->mutex); 2385 2386 return ret; 2387 } 2388 2389 int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size) 2390 { 2391 struct smu_context *smu = handle; 2392 int ret = 0; 2393 2394 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2395 return -EOPNOTSUPP; 2396 2397 mutex_lock(&smu->mutex); 2398 2399 smu_bump_power_profile_mode(smu, param, param_size); 2400 2401 mutex_unlock(&smu->mutex); 2402 2403 return ret; 2404 } 2405 2406 2407 u32 smu_get_fan_control_mode(void *handle) 2408 { 2409 struct smu_context *smu = handle; 2410 u32 ret = 0; 2411 2412 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2413 return AMD_FAN_CTRL_NONE; 2414 2415 mutex_lock(&smu->mutex); 2416 2417 if (smu->ppt_funcs->get_fan_control_mode) 2418 ret = smu->ppt_funcs->get_fan_control_mode(smu); 2419 2420 mutex_unlock(&smu->mutex); 2421 2422 return ret; 2423 } 2424 2425 int smu_set_fan_control_mode(struct smu_context *smu, int value) 2426 { 2427 int ret = 0; 2428 2429 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2430 return -EOPNOTSUPP; 2431 2432 mutex_lock(&smu->mutex); 2433 2434 if (smu->ppt_funcs->set_fan_control_mode) { 2435 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 2436 if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) 2437 smu->user_dpm_profile.fan_mode = value; 2438 } 2439 2440 mutex_unlock(&smu->mutex); 2441 2442 /* reset user dpm fan speed */ 2443 if (!ret && value != AMD_FAN_CTRL_MANUAL && 2444 smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) 2445 smu->user_dpm_profile.fan_speed_percent = 0; 2446 2447 return ret; 2448 } 2449 2450 void smu_pp_set_fan_control_mode(void *handle, u32 value) { 2451 struct smu_context *smu = handle; 2452 2453 smu_set_fan_control_mode(smu, value); 2454 } 2455 2456 2457 int smu_get_fan_speed_percent(void *handle, u32 *speed) 2458 { 2459 struct smu_context *smu = handle; 2460 int ret = 0; 2461 uint32_t percent; 2462 2463 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2464 return -EOPNOTSUPP; 2465 2466 mutex_lock(&smu->mutex); 2467 2468 if (smu->ppt_funcs->get_fan_speed_percent) { 2469 ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent); 2470 if (!ret) { 2471 *speed = percent > 100 ? 100 : percent; 2472 } 2473 } 2474 2475 mutex_unlock(&smu->mutex); 2476 2477 2478 return ret; 2479 } 2480 2481 int smu_set_fan_speed_percent(void *handle, u32 speed) 2482 { 2483 struct smu_context *smu = handle; 2484 int ret = 0; 2485 2486 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2487 return -EOPNOTSUPP; 2488 2489 mutex_lock(&smu->mutex); 2490 2491 if (smu->ppt_funcs->set_fan_speed_percent) { 2492 if (speed > 100) 2493 speed = 100; 2494 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); 2495 if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) 2496 smu->user_dpm_profile.fan_speed_percent = speed; 2497 } 2498 2499 mutex_unlock(&smu->mutex); 2500 2501 return ret; 2502 } 2503 2504 int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) 2505 { 2506 struct smu_context *smu = handle; 2507 int ret = 0; 2508 u32 percent; 2509 2510 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2511 return -EOPNOTSUPP; 2512 2513 mutex_lock(&smu->mutex); 2514 2515 if (smu->ppt_funcs->get_fan_speed_percent) { 2516 ret = smu->ppt_funcs->get_fan_speed_percent(smu, &percent); 2517 *speed = percent * smu->fan_max_rpm / 100; 2518 } 2519 2520 mutex_unlock(&smu->mutex); 2521 2522 return ret; 2523 } 2524 2525 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) 2526 { 2527 int ret = 0; 2528 2529 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2530 return -EOPNOTSUPP; 2531 2532 mutex_lock(&smu->mutex); 2533 2534 ret = smu_set_min_dcef_deep_sleep(smu, clk); 2535 2536 mutex_unlock(&smu->mutex); 2537 2538 return ret; 2539 } 2540 2541 int smu_get_clock_by_type_with_latency(struct smu_context *smu, 2542 enum smu_clk_type clk_type, 2543 struct pp_clock_levels_with_latency *clocks) 2544 { 2545 int ret = 0; 2546 2547 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2548 return -EOPNOTSUPP; 2549 2550 mutex_lock(&smu->mutex); 2551 2552 if (smu->ppt_funcs->get_clock_by_type_with_latency) 2553 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 2554 2555 mutex_unlock(&smu->mutex); 2556 2557 return ret; 2558 } 2559 2560 int smu_display_clock_voltage_request(struct smu_context *smu, 2561 struct pp_display_clock_request *clock_req) 2562 { 2563 int ret = 0; 2564 2565 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2566 return -EOPNOTSUPP; 2567 2568 mutex_lock(&smu->mutex); 2569 2570 if (smu->ppt_funcs->display_clock_voltage_request) 2571 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2572 2573 mutex_unlock(&smu->mutex); 2574 2575 return ret; 2576 } 2577 2578 2579 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) 2580 { 2581 int ret = -EINVAL; 2582 2583 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2584 return -EOPNOTSUPP; 2585 2586 mutex_lock(&smu->mutex); 2587 2588 if (smu->ppt_funcs->display_disable_memory_clock_switch) 2589 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2590 2591 mutex_unlock(&smu->mutex); 2592 2593 return ret; 2594 } 2595 2596 int smu_set_xgmi_pstate(void *handle, 2597 uint32_t pstate) 2598 { 2599 struct smu_context *smu = handle; 2600 int ret = 0; 2601 2602 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2603 return -EOPNOTSUPP; 2604 2605 mutex_lock(&smu->mutex); 2606 2607 if (smu->ppt_funcs->set_xgmi_pstate) 2608 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2609 2610 mutex_unlock(&smu->mutex); 2611 2612 if(ret) 2613 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 2614 2615 return ret; 2616 } 2617 2618 int smu_set_azalia_d3_pme(struct smu_context *smu) 2619 { 2620 int ret = 0; 2621 2622 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2623 return -EOPNOTSUPP; 2624 2625 mutex_lock(&smu->mutex); 2626 2627 if (smu->ppt_funcs->set_azalia_d3_pme) 2628 ret = smu->ppt_funcs->set_azalia_d3_pme(smu); 2629 2630 mutex_unlock(&smu->mutex); 2631 2632 return ret; 2633 } 2634 2635 /* 2636 * On system suspending or resetting, the dpm_enabled 2637 * flag will be cleared. So that those SMU services which 2638 * are not supported will be gated. 2639 * 2640 * However, the baco/mode1 reset should still be granted 2641 * as they are still supported and necessary. 2642 */ 2643 bool smu_baco_is_support(struct smu_context *smu) 2644 { 2645 bool ret = false; 2646 2647 if (!smu->pm_enabled) 2648 return false; 2649 2650 mutex_lock(&smu->mutex); 2651 2652 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2653 ret = smu->ppt_funcs->baco_is_support(smu); 2654 2655 mutex_unlock(&smu->mutex); 2656 2657 return ret; 2658 } 2659 2660 int smu_get_baco_capability(void *handle, bool *cap) 2661 { 2662 struct smu_context *smu = handle; 2663 int ret = 0; 2664 2665 *cap = false; 2666 2667 if (!smu->pm_enabled) 2668 return 0; 2669 2670 mutex_lock(&smu->mutex); 2671 2672 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2673 *cap = smu->ppt_funcs->baco_is_support(smu); 2674 2675 mutex_unlock(&smu->mutex); 2676 2677 return ret; 2678 } 2679 2680 2681 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) 2682 { 2683 if (smu->ppt_funcs->baco_get_state) 2684 return -EINVAL; 2685 2686 mutex_lock(&smu->mutex); 2687 *state = smu->ppt_funcs->baco_get_state(smu); 2688 mutex_unlock(&smu->mutex); 2689 2690 return 0; 2691 } 2692 2693 int smu_baco_enter(struct smu_context *smu) 2694 { 2695 int ret = 0; 2696 2697 if (!smu->pm_enabled) 2698 return -EOPNOTSUPP; 2699 2700 mutex_lock(&smu->mutex); 2701 2702 if (smu->ppt_funcs->baco_enter) 2703 ret = smu->ppt_funcs->baco_enter(smu); 2704 2705 mutex_unlock(&smu->mutex); 2706 2707 if (ret) 2708 dev_err(smu->adev->dev, "Failed to enter BACO state!\n"); 2709 2710 return ret; 2711 } 2712 2713 int smu_baco_exit(struct smu_context *smu) 2714 { 2715 int ret = 0; 2716 2717 if (!smu->pm_enabled) 2718 return -EOPNOTSUPP; 2719 2720 mutex_lock(&smu->mutex); 2721 2722 if (smu->ppt_funcs->baco_exit) 2723 ret = smu->ppt_funcs->baco_exit(smu); 2724 2725 mutex_unlock(&smu->mutex); 2726 2727 if (ret) 2728 dev_err(smu->adev->dev, "Failed to exit BACO state!\n"); 2729 2730 return ret; 2731 } 2732 2733 int smu_baco_set_state(void *handle, int state) 2734 { 2735 struct smu_context *smu = handle; 2736 int ret = 0; 2737 2738 if (!smu->pm_enabled) 2739 return -EOPNOTSUPP; 2740 2741 if (state == 0) { 2742 mutex_lock(&smu->mutex); 2743 2744 if (smu->ppt_funcs->baco_exit) 2745 ret = smu->ppt_funcs->baco_exit(smu); 2746 2747 mutex_unlock(&smu->mutex); 2748 } else if (state == 1) { 2749 mutex_lock(&smu->mutex); 2750 2751 if (smu->ppt_funcs->baco_enter) 2752 ret = smu->ppt_funcs->baco_enter(smu); 2753 2754 mutex_unlock(&smu->mutex); 2755 2756 } else { 2757 return -EINVAL; 2758 } 2759 2760 if (ret) 2761 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", 2762 (state)?"enter":"exit"); 2763 2764 return ret; 2765 } 2766 2767 bool smu_mode1_reset_is_support(struct smu_context *smu) 2768 { 2769 bool ret = false; 2770 2771 if (!smu->pm_enabled) 2772 return false; 2773 2774 mutex_lock(&smu->mutex); 2775 2776 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 2777 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 2778 2779 mutex_unlock(&smu->mutex); 2780 2781 return ret; 2782 } 2783 2784 int smu_mode1_reset(struct smu_context *smu) 2785 { 2786 int ret = 0; 2787 2788 if (!smu->pm_enabled) 2789 return -EOPNOTSUPP; 2790 2791 mutex_lock(&smu->mutex); 2792 2793 if (smu->ppt_funcs->mode1_reset) 2794 ret = smu->ppt_funcs->mode1_reset(smu); 2795 2796 mutex_unlock(&smu->mutex); 2797 2798 return ret; 2799 } 2800 2801 int smu_mode2_reset(void *handle) 2802 { 2803 struct smu_context *smu = handle; 2804 int ret = 0; 2805 2806 if (!smu->pm_enabled) 2807 return -EOPNOTSUPP; 2808 2809 mutex_lock(&smu->mutex); 2810 2811 if (smu->ppt_funcs->mode2_reset) 2812 ret = smu->ppt_funcs->mode2_reset(smu); 2813 2814 mutex_unlock(&smu->mutex); 2815 2816 if (ret) 2817 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 2818 2819 return ret; 2820 } 2821 2822 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 2823 struct pp_smu_nv_clock_table *max_clocks) 2824 { 2825 int ret = 0; 2826 2827 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2828 return -EOPNOTSUPP; 2829 2830 mutex_lock(&smu->mutex); 2831 2832 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 2833 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 2834 2835 mutex_unlock(&smu->mutex); 2836 2837 return ret; 2838 } 2839 2840 int smu_get_uclk_dpm_states(struct smu_context *smu, 2841 unsigned int *clock_values_in_khz, 2842 unsigned int *num_states) 2843 { 2844 int ret = 0; 2845 2846 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2847 return -EOPNOTSUPP; 2848 2849 mutex_lock(&smu->mutex); 2850 2851 if (smu->ppt_funcs->get_uclk_dpm_states) 2852 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 2853 2854 mutex_unlock(&smu->mutex); 2855 2856 return ret; 2857 } 2858 2859 enum amd_pm_state_type smu_get_current_power_state(void *handle) 2860 { 2861 struct smu_context *smu = handle; 2862 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 2863 2864 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2865 return -EOPNOTSUPP; 2866 2867 mutex_lock(&smu->mutex); 2868 2869 if (smu->ppt_funcs->get_current_power_state) 2870 pm_state = smu->ppt_funcs->get_current_power_state(smu); 2871 2872 mutex_unlock(&smu->mutex); 2873 2874 return pm_state; 2875 } 2876 2877 int smu_get_dpm_clock_table(struct smu_context *smu, 2878 struct dpm_clocks *clock_table) 2879 { 2880 int ret = 0; 2881 2882 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2883 return -EOPNOTSUPP; 2884 2885 mutex_lock(&smu->mutex); 2886 2887 if (smu->ppt_funcs->get_dpm_clock_table) 2888 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 2889 2890 mutex_unlock(&smu->mutex); 2891 2892 return ret; 2893 } 2894 2895 ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) 2896 { 2897 struct smu_context *smu = handle; 2898 ssize_t size; 2899 2900 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2901 return -EOPNOTSUPP; 2902 2903 if (!smu->ppt_funcs->get_gpu_metrics) 2904 return -EOPNOTSUPP; 2905 2906 mutex_lock(&smu->mutex); 2907 2908 size = smu->ppt_funcs->get_gpu_metrics(smu, table); 2909 2910 mutex_unlock(&smu->mutex); 2911 2912 return size; 2913 } 2914 2915 int smu_enable_mgpu_fan_boost(void *handle) 2916 { 2917 struct smu_context *smu = handle; 2918 int ret = 0; 2919 2920 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2921 return -EOPNOTSUPP; 2922 2923 mutex_lock(&smu->mutex); 2924 2925 if (smu->ppt_funcs->enable_mgpu_fan_boost) 2926 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 2927 2928 mutex_unlock(&smu->mutex); 2929 2930 return ret; 2931 } 2932 2933 int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state) 2934 { 2935 int ret = 0; 2936 2937 mutex_lock(&smu->mutex); 2938 if (smu->ppt_funcs->gfx_state_change_set) 2939 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 2940 mutex_unlock(&smu->mutex); 2941 2942 return ret; 2943 } 2944 2945 static const struct amd_pm_funcs swsmu_pm_funcs = { 2946 /* export for sysfs */ 2947 .set_fan_control_mode = smu_pp_set_fan_control_mode, 2948 .get_fan_control_mode = smu_get_fan_control_mode, 2949 .set_fan_speed_percent = smu_set_fan_speed_percent, 2950 .get_fan_speed_percent = smu_get_fan_speed_percent, 2951 .force_performance_level = smu_force_performance_level, 2952 .read_sensor = smu_read_sensor, 2953 .get_performance_level = smu_get_performance_level, 2954 .get_current_power_state = smu_get_current_power_state, 2955 .get_fan_speed_rpm = smu_get_fan_speed_rpm, 2956 .set_fan_speed_rpm = smu_set_fan_speed_rpm, 2957 .get_pp_num_states = smu_get_power_num_states, 2958 .get_pp_table = smu_sys_get_pp_table, 2959 .set_pp_table = smu_sys_set_pp_table, 2960 .switch_power_profile = smu_switch_power_profile, 2961 /* export to amdgpu */ 2962 .dispatch_tasks = smu_handle_dpm_task, 2963 .set_powergating_by_smu = smu_dpm_set_power_gate, 2964 .set_power_limit = smu_set_power_limit, 2965 .odn_edit_dpm_table = smu_od_edit_dpm_table, 2966 .set_mp1_state = smu_set_mp1_state, 2967 /* export to DC */ 2968 .get_sclk = smu_get_sclk, 2969 .get_mclk = smu_get_mclk, 2970 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, 2971 .get_asic_baco_capability = smu_get_baco_capability, 2972 .set_asic_baco_state = smu_baco_set_state, 2973 .get_ppfeature_status = smu_sys_get_pp_feature_mask, 2974 .set_ppfeature_status = smu_sys_set_pp_feature_mask, 2975 .asic_reset_mode_2 = smu_mode2_reset, 2976 .set_df_cstate = smu_set_df_cstate, 2977 .set_xgmi_pstate = smu_set_xgmi_pstate, 2978 .get_gpu_metrics = smu_sys_get_gpu_metrics, 2979 .set_power_profile_mode = smu_set_power_profile_mode, 2980 .get_power_profile_mode = smu_get_power_profile_mode, 2981 .force_clock_level = smu_force_ppclk_levels, 2982 .print_clock_levels = smu_print_ppclk_levels, 2983 }; 2984