1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, 74 uint32_t block_type, 75 bool gate, 76 int inst) 77 { 78 int ret = 0; 79 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 80 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 81 bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN; 82 83 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state && 84 (!is_vcn || adev->vcn.num_vcn_inst == 1)) { 85 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 86 block_type, gate ? "gate" : "ungate"); 87 return 0; 88 } 89 90 mutex_lock(&adev->pm.mutex); 91 92 switch (block_type) { 93 case AMD_IP_BLOCK_TYPE_UVD: 94 case AMD_IP_BLOCK_TYPE_VCE: 95 case AMD_IP_BLOCK_TYPE_GFX: 96 case AMD_IP_BLOCK_TYPE_SDMA: 97 case AMD_IP_BLOCK_TYPE_JPEG: 98 case AMD_IP_BLOCK_TYPE_GMC: 99 case AMD_IP_BLOCK_TYPE_ACP: 100 case AMD_IP_BLOCK_TYPE_VPE: 101 if (pp_funcs && pp_funcs->set_powergating_by_smu) 102 ret = (pp_funcs->set_powergating_by_smu( 103 (adev)->powerplay.pp_handle, block_type, gate, 0)); 104 break; 105 case AMD_IP_BLOCK_TYPE_VCN: 106 if (pp_funcs && pp_funcs->set_powergating_by_smu) 107 ret = (pp_funcs->set_powergating_by_smu( 108 (adev)->powerplay.pp_handle, block_type, gate, inst)); 109 break; 110 default: 111 break; 112 } 113 114 if (!ret) 115 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 116 117 mutex_unlock(&adev->pm.mutex); 118 119 return ret; 120 } 121 122 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 123 { 124 struct smu_context *smu = adev->powerplay.pp_handle; 125 int ret = -EOPNOTSUPP; 126 127 mutex_lock(&adev->pm.mutex); 128 ret = smu_set_gfx_power_up_by_imu(smu); 129 mutex_unlock(&adev->pm.mutex); 130 131 msleep(10); 132 133 return ret; 134 } 135 136 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 137 { 138 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 139 void *pp_handle = adev->powerplay.pp_handle; 140 int ret = 0; 141 142 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 143 return -ENOENT; 144 145 mutex_lock(&adev->pm.mutex); 146 147 /* enter BACO state */ 148 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 149 150 mutex_unlock(&adev->pm.mutex); 151 152 return ret; 153 } 154 155 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 156 { 157 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 158 void *pp_handle = adev->powerplay.pp_handle; 159 int ret = 0; 160 161 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 162 return -ENOENT; 163 164 mutex_lock(&adev->pm.mutex); 165 166 /* exit BACO state */ 167 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 168 169 mutex_unlock(&adev->pm.mutex); 170 171 return ret; 172 } 173 174 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 175 enum pp_mp1_state mp1_state) 176 { 177 int ret = 0; 178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 179 180 if (mp1_state == PP_MP1_STATE_FLR) { 181 /* VF lost access to SMU */ 182 if (amdgpu_sriov_vf(adev)) 183 adev->pm.dpm_enabled = false; 184 } else if (pp_funcs && pp_funcs->set_mp1_state) { 185 mutex_lock(&adev->pm.mutex); 186 187 ret = pp_funcs->set_mp1_state( 188 adev->powerplay.pp_handle, 189 mp1_state); 190 191 mutex_unlock(&adev->pm.mutex); 192 } 193 194 return ret; 195 } 196 197 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) 198 { 199 int ret = 0; 200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 201 202 if (pp_funcs && pp_funcs->notify_rlc_state) { 203 mutex_lock(&adev->pm.mutex); 204 205 ret = pp_funcs->notify_rlc_state( 206 adev->powerplay.pp_handle, 207 en); 208 209 mutex_unlock(&adev->pm.mutex); 210 } 211 212 return ret; 213 } 214 215 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 216 { 217 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 218 void *pp_handle = adev->powerplay.pp_handle; 219 int ret; 220 221 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 222 return 0; 223 /* Don't use baco for reset in S3. 224 * This is a workaround for some platforms 225 * where entering BACO during suspend 226 * seems to cause reboots or hangs. 227 * This might be related to the fact that BACO controls 228 * power to the whole GPU including devices like audio and USB. 229 * Powering down/up everything may adversely affect these other 230 * devices. Needs more investigation. 231 */ 232 if (adev->in_s3) 233 return 0; 234 235 mutex_lock(&adev->pm.mutex); 236 237 ret = pp_funcs->get_asic_baco_capability(pp_handle); 238 239 mutex_unlock(&adev->pm.mutex); 240 241 return ret; 242 } 243 244 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 245 { 246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 247 void *pp_handle = adev->powerplay.pp_handle; 248 int ret = 0; 249 250 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 251 return -ENOENT; 252 253 mutex_lock(&adev->pm.mutex); 254 255 ret = pp_funcs->asic_reset_mode_2(pp_handle); 256 257 mutex_unlock(&adev->pm.mutex); 258 259 return ret; 260 } 261 262 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 263 { 264 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 265 void *pp_handle = adev->powerplay.pp_handle; 266 int ret = 0; 267 268 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 269 return -ENOENT; 270 271 mutex_lock(&adev->pm.mutex); 272 273 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 274 275 mutex_unlock(&adev->pm.mutex); 276 277 return ret; 278 } 279 280 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 281 { 282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 283 void *pp_handle = adev->powerplay.pp_handle; 284 int ret = 0; 285 286 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 287 return -ENOENT; 288 289 mutex_lock(&adev->pm.mutex); 290 291 /* enter BACO state */ 292 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 293 if (ret) 294 goto out; 295 296 /* exit BACO state */ 297 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 298 299 out: 300 mutex_unlock(&adev->pm.mutex); 301 return ret; 302 } 303 304 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 305 { 306 struct smu_context *smu = adev->powerplay.pp_handle; 307 bool support_mode1_reset = false; 308 309 if (is_support_sw_smu(adev)) { 310 mutex_lock(&adev->pm.mutex); 311 support_mode1_reset = smu_mode1_reset_is_support(smu); 312 mutex_unlock(&adev->pm.mutex); 313 } 314 315 return support_mode1_reset; 316 } 317 318 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 319 { 320 struct smu_context *smu = adev->powerplay.pp_handle; 321 int ret = -EOPNOTSUPP; 322 323 if (is_support_sw_smu(adev)) { 324 mutex_lock(&adev->pm.mutex); 325 ret = smu_mode1_reset(smu); 326 mutex_unlock(&adev->pm.mutex); 327 } 328 329 return ret; 330 } 331 332 bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev) 333 { 334 struct smu_context *smu = adev->powerplay.pp_handle; 335 bool support_link_reset = false; 336 337 if (is_support_sw_smu(adev)) { 338 mutex_lock(&adev->pm.mutex); 339 support_link_reset = smu_link_reset_is_support(smu); 340 mutex_unlock(&adev->pm.mutex); 341 } 342 343 return support_link_reset; 344 } 345 346 int amdgpu_dpm_link_reset(struct amdgpu_device *adev) 347 { 348 struct smu_context *smu = adev->powerplay.pp_handle; 349 int ret = -EOPNOTSUPP; 350 351 if (is_support_sw_smu(adev)) { 352 mutex_lock(&adev->pm.mutex); 353 ret = smu_link_reset(smu); 354 mutex_unlock(&adev->pm.mutex); 355 } 356 357 return ret; 358 } 359 360 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 361 enum PP_SMC_POWER_PROFILE type, 362 bool en) 363 { 364 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 365 int ret = 0; 366 367 if (amdgpu_sriov_vf(adev)) 368 return 0; 369 370 if (pp_funcs && pp_funcs->switch_power_profile) { 371 mutex_lock(&adev->pm.mutex); 372 ret = pp_funcs->switch_power_profile( 373 adev->powerplay.pp_handle, type, en); 374 mutex_unlock(&adev->pm.mutex); 375 } 376 377 return ret; 378 } 379 380 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 381 uint32_t pstate) 382 { 383 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 384 int ret = 0; 385 386 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 387 mutex_lock(&adev->pm.mutex); 388 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 389 pstate); 390 mutex_unlock(&adev->pm.mutex); 391 } 392 393 return ret; 394 } 395 396 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 397 uint32_t cstate) 398 { 399 int ret = 0; 400 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 401 void *pp_handle = adev->powerplay.pp_handle; 402 403 if (pp_funcs && pp_funcs->set_df_cstate) { 404 mutex_lock(&adev->pm.mutex); 405 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 406 mutex_unlock(&adev->pm.mutex); 407 } 408 409 return ret; 410 } 411 412 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, 413 enum pp_pm_policy p_type, char *buf) 414 { 415 struct smu_context *smu = adev->powerplay.pp_handle; 416 int ret = -EOPNOTSUPP; 417 418 if (is_support_sw_smu(adev)) { 419 mutex_lock(&adev->pm.mutex); 420 ret = smu_get_pm_policy_info(smu, p_type, buf); 421 mutex_unlock(&adev->pm.mutex); 422 } 423 424 return ret; 425 } 426 427 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, 428 int policy_level) 429 { 430 struct smu_context *smu = adev->powerplay.pp_handle; 431 int ret = -EOPNOTSUPP; 432 433 if (is_support_sw_smu(adev)) { 434 mutex_lock(&adev->pm.mutex); 435 ret = smu_set_pm_policy(smu, policy_type, policy_level); 436 mutex_unlock(&adev->pm.mutex); 437 } 438 439 return ret; 440 } 441 442 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 443 { 444 void *pp_handle = adev->powerplay.pp_handle; 445 const struct amd_pm_funcs *pp_funcs = 446 adev->powerplay.pp_funcs; 447 int ret = 0; 448 449 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 450 mutex_lock(&adev->pm.mutex); 451 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 452 mutex_unlock(&adev->pm.mutex); 453 } 454 455 return ret; 456 } 457 458 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 459 uint32_t msg_id) 460 { 461 void *pp_handle = adev->powerplay.pp_handle; 462 const struct amd_pm_funcs *pp_funcs = 463 adev->powerplay.pp_funcs; 464 int ret = 0; 465 466 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 467 mutex_lock(&adev->pm.mutex); 468 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 469 msg_id); 470 mutex_unlock(&adev->pm.mutex); 471 } 472 473 return ret; 474 } 475 476 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 477 bool acquire) 478 { 479 void *pp_handle = adev->powerplay.pp_handle; 480 const struct amd_pm_funcs *pp_funcs = 481 adev->powerplay.pp_funcs; 482 int ret = -EOPNOTSUPP; 483 484 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 485 mutex_lock(&adev->pm.mutex); 486 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 487 acquire); 488 mutex_unlock(&adev->pm.mutex); 489 } 490 491 return ret; 492 } 493 494 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 495 { 496 if (adev->pm.dpm_enabled) { 497 mutex_lock(&adev->pm.mutex); 498 if (power_supply_is_system_supplied() > 0) 499 adev->pm.ac_power = true; 500 else 501 adev->pm.ac_power = false; 502 503 if (adev->powerplay.pp_funcs && 504 adev->powerplay.pp_funcs->enable_bapm) 505 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 506 507 if (is_support_sw_smu(adev)) 508 smu_set_ac_dc(adev->powerplay.pp_handle); 509 510 mutex_unlock(&adev->pm.mutex); 511 } 512 } 513 514 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 515 void *data, uint32_t *size) 516 { 517 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 518 int ret = -EINVAL; 519 520 if (!data || !size) 521 return -EINVAL; 522 523 if (pp_funcs && pp_funcs->read_sensor) { 524 mutex_lock(&adev->pm.mutex); 525 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 526 sensor, 527 data, 528 size); 529 mutex_unlock(&adev->pm.mutex); 530 } 531 532 return ret; 533 } 534 535 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 536 { 537 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 538 int ret = -EOPNOTSUPP; 539 540 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 541 mutex_lock(&adev->pm.mutex); 542 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 543 mutex_unlock(&adev->pm.mutex); 544 } 545 546 return ret; 547 } 548 549 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 550 { 551 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 552 int ret = -EOPNOTSUPP; 553 554 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 555 mutex_lock(&adev->pm.mutex); 556 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 557 mutex_unlock(&adev->pm.mutex); 558 } 559 560 return ret; 561 } 562 563 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 564 { 565 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 566 int i; 567 568 if (!adev->pm.dpm_enabled) 569 return; 570 571 if (!pp_funcs->pm_compute_clocks) 572 return; 573 574 if (adev->mode_info.num_crtc) 575 amdgpu_display_bandwidth_update(adev); 576 577 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 578 struct amdgpu_ring *ring = adev->rings[i]; 579 if (ring && ring->sched.ready) 580 amdgpu_fence_wait_empty(ring); 581 } 582 583 mutex_lock(&adev->pm.mutex); 584 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 585 mutex_unlock(&adev->pm.mutex); 586 } 587 588 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 589 { 590 int ret = 0; 591 592 if (adev->family == AMDGPU_FAMILY_SI) { 593 mutex_lock(&adev->pm.mutex); 594 if (enable) { 595 adev->pm.dpm.uvd_active = true; 596 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 597 } else { 598 adev->pm.dpm.uvd_active = false; 599 } 600 mutex_unlock(&adev->pm.mutex); 601 602 amdgpu_dpm_compute_clocks(adev); 603 return; 604 } 605 606 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0); 607 if (ret) 608 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 609 enable ? "enable" : "disable", ret); 610 } 611 612 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst) 613 { 614 int ret = 0; 615 616 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst); 617 if (ret) 618 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 619 enable ? "enable" : "disable", ret); 620 } 621 622 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 623 { 624 int ret = 0; 625 626 if (adev->family == AMDGPU_FAMILY_SI) { 627 mutex_lock(&adev->pm.mutex); 628 if (enable) { 629 adev->pm.dpm.vce_active = true; 630 /* XXX select vce level based on ring/task */ 631 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 632 } else { 633 adev->pm.dpm.vce_active = false; 634 } 635 mutex_unlock(&adev->pm.mutex); 636 637 amdgpu_dpm_compute_clocks(adev); 638 return; 639 } 640 641 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0); 642 if (ret) 643 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 644 enable ? "enable" : "disable", ret); 645 } 646 647 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 648 { 649 int ret = 0; 650 651 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0); 652 if (ret) 653 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 654 enable ? "enable" : "disable", ret); 655 } 656 657 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) 658 { 659 int ret = 0; 660 661 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0); 662 if (ret) 663 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", 664 enable ? "enable" : "disable", ret); 665 } 666 667 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 668 { 669 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 670 int r = 0; 671 672 if (!pp_funcs || !pp_funcs->load_firmware || 673 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU))) 674 return 0; 675 676 mutex_lock(&adev->pm.mutex); 677 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 678 if (r) { 679 pr_err("smu firmware loading failed\n"); 680 goto out; 681 } 682 683 if (smu_version) 684 *smu_version = adev->pm.fw_version; 685 686 out: 687 mutex_unlock(&adev->pm.mutex); 688 return r; 689 } 690 691 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 692 { 693 int ret = 0; 694 695 if (is_support_sw_smu(adev)) { 696 mutex_lock(&adev->pm.mutex); 697 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 698 enable); 699 mutex_unlock(&adev->pm.mutex); 700 } 701 702 return ret; 703 } 704 705 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 706 { 707 struct smu_context *smu = adev->powerplay.pp_handle; 708 int ret = 0; 709 710 if (!is_support_sw_smu(adev)) 711 return -EOPNOTSUPP; 712 713 mutex_lock(&adev->pm.mutex); 714 ret = smu_send_hbm_bad_pages_num(smu, size); 715 mutex_unlock(&adev->pm.mutex); 716 717 return ret; 718 } 719 720 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 721 { 722 struct smu_context *smu = adev->powerplay.pp_handle; 723 int ret = 0; 724 725 if (!is_support_sw_smu(adev)) 726 return -EOPNOTSUPP; 727 728 mutex_lock(&adev->pm.mutex); 729 ret = smu_send_hbm_bad_channel_flag(smu, size); 730 mutex_unlock(&adev->pm.mutex); 731 732 return ret; 733 } 734 735 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) 736 { 737 struct smu_context *smu = adev->powerplay.pp_handle; 738 int ret; 739 740 if (!is_support_sw_smu(adev)) 741 return -EOPNOTSUPP; 742 743 mutex_lock(&adev->pm.mutex); 744 ret = smu_send_rma_reason(smu); 745 mutex_unlock(&adev->pm.mutex); 746 747 if (adev->cper.enabled) 748 if (amdgpu_cper_generate_bp_threshold_record(adev)) 749 dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); 750 751 return ret; 752 } 753 754 /** 755 * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported 756 * @adev: amdgpu_device pointer 757 * 758 * This function checks if the SMU supports resetting the SDMA engine. 759 * It returns false if the hardware does not support software SMU or 760 * if the feature is not supported. 761 */ 762 bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev) 763 { 764 struct smu_context *smu = adev->powerplay.pp_handle; 765 bool ret; 766 767 if (!is_support_sw_smu(adev)) 768 return false; 769 770 mutex_lock(&adev->pm.mutex); 771 ret = smu_reset_sdma_is_supported(smu); 772 mutex_unlock(&adev->pm.mutex); 773 774 return ret; 775 } 776 777 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask) 778 { 779 struct smu_context *smu = adev->powerplay.pp_handle; 780 int ret; 781 782 if (!is_support_sw_smu(adev)) 783 return -EOPNOTSUPP; 784 785 mutex_lock(&adev->pm.mutex); 786 ret = smu_reset_sdma(smu, inst_mask); 787 mutex_unlock(&adev->pm.mutex); 788 789 return ret; 790 } 791 792 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 793 enum pp_clock_type type, 794 uint32_t *min, 795 uint32_t *max) 796 { 797 int ret = 0; 798 799 if (type != PP_SCLK) 800 return -EINVAL; 801 802 if (!is_support_sw_smu(adev)) 803 return -EOPNOTSUPP; 804 805 mutex_lock(&adev->pm.mutex); 806 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 807 SMU_SCLK, 808 min, 809 max); 810 mutex_unlock(&adev->pm.mutex); 811 812 return ret; 813 } 814 815 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 816 enum pp_clock_type type, 817 uint32_t min, 818 uint32_t max) 819 { 820 struct smu_context *smu = adev->powerplay.pp_handle; 821 int ret = 0; 822 823 if (type != PP_SCLK) 824 return -EINVAL; 825 826 if (!is_support_sw_smu(adev)) 827 return -EOPNOTSUPP; 828 829 mutex_lock(&adev->pm.mutex); 830 ret = smu_set_soft_freq_range(smu, 831 SMU_SCLK, 832 min, 833 max); 834 mutex_unlock(&adev->pm.mutex); 835 836 return ret; 837 } 838 839 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 840 { 841 struct smu_context *smu = adev->powerplay.pp_handle; 842 int ret = 0; 843 844 if (!is_support_sw_smu(adev)) 845 return 0; 846 847 mutex_lock(&adev->pm.mutex); 848 ret = smu_write_watermarks_table(smu); 849 mutex_unlock(&adev->pm.mutex); 850 851 return ret; 852 } 853 854 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 855 enum smu_event_type event, 856 uint64_t event_arg) 857 { 858 struct smu_context *smu = adev->powerplay.pp_handle; 859 int ret = 0; 860 861 if (!is_support_sw_smu(adev)) 862 return -EOPNOTSUPP; 863 864 mutex_lock(&adev->pm.mutex); 865 ret = smu_wait_for_event(smu, event, event_arg); 866 mutex_unlock(&adev->pm.mutex); 867 868 return ret; 869 } 870 871 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 872 { 873 struct smu_context *smu = adev->powerplay.pp_handle; 874 int ret = 0; 875 876 if (!is_support_sw_smu(adev)) 877 return -EOPNOTSUPP; 878 879 mutex_lock(&adev->pm.mutex); 880 ret = smu_set_residency_gfxoff(smu, value); 881 mutex_unlock(&adev->pm.mutex); 882 883 return ret; 884 } 885 886 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 887 { 888 struct smu_context *smu = adev->powerplay.pp_handle; 889 int ret = 0; 890 891 if (!is_support_sw_smu(adev)) 892 return -EOPNOTSUPP; 893 894 mutex_lock(&adev->pm.mutex); 895 ret = smu_get_residency_gfxoff(smu, value); 896 mutex_unlock(&adev->pm.mutex); 897 898 return ret; 899 } 900 901 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 902 { 903 struct smu_context *smu = adev->powerplay.pp_handle; 904 int ret = 0; 905 906 if (!is_support_sw_smu(adev)) 907 return -EOPNOTSUPP; 908 909 mutex_lock(&adev->pm.mutex); 910 ret = smu_get_entrycount_gfxoff(smu, value); 911 mutex_unlock(&adev->pm.mutex); 912 913 return ret; 914 } 915 916 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 917 { 918 struct smu_context *smu = adev->powerplay.pp_handle; 919 int ret = 0; 920 921 if (!is_support_sw_smu(adev)) 922 return -EOPNOTSUPP; 923 924 mutex_lock(&adev->pm.mutex); 925 ret = smu_get_status_gfxoff(smu, value); 926 mutex_unlock(&adev->pm.mutex); 927 928 return ret; 929 } 930 931 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 932 { 933 struct smu_context *smu = adev->powerplay.pp_handle; 934 935 if (!is_support_sw_smu(adev)) 936 return 0; 937 938 return atomic64_read(&smu->throttle_int_counter); 939 } 940 941 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 942 * @adev: amdgpu_device pointer 943 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 944 * 945 */ 946 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 947 enum gfx_change_state state) 948 { 949 mutex_lock(&adev->pm.mutex); 950 if (adev->powerplay.pp_funcs && 951 adev->powerplay.pp_funcs->gfx_state_change_set) 952 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 953 (adev)->powerplay.pp_handle, state)); 954 mutex_unlock(&adev->pm.mutex); 955 } 956 957 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 958 void *umc_ecc) 959 { 960 struct smu_context *smu = adev->powerplay.pp_handle; 961 int ret = 0; 962 963 if (!is_support_sw_smu(adev)) 964 return -EOPNOTSUPP; 965 966 mutex_lock(&adev->pm.mutex); 967 ret = smu_get_ecc_info(smu, umc_ecc); 968 mutex_unlock(&adev->pm.mutex); 969 970 return ret; 971 } 972 973 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 974 uint32_t idx) 975 { 976 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 977 struct amd_vce_state *vstate = NULL; 978 979 if (!pp_funcs->get_vce_clock_state) 980 return NULL; 981 982 mutex_lock(&adev->pm.mutex); 983 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 984 idx); 985 mutex_unlock(&adev->pm.mutex); 986 987 return vstate; 988 } 989 990 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 991 enum amd_pm_state_type *state) 992 { 993 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 994 995 mutex_lock(&adev->pm.mutex); 996 997 if (!pp_funcs->get_current_power_state) { 998 *state = adev->pm.dpm.user_state; 999 goto out; 1000 } 1001 1002 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 1003 if (*state < POWER_STATE_TYPE_DEFAULT || 1004 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 1005 *state = adev->pm.dpm.user_state; 1006 1007 out: 1008 mutex_unlock(&adev->pm.mutex); 1009 } 1010 1011 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 1012 enum amd_pm_state_type state) 1013 { 1014 mutex_lock(&adev->pm.mutex); 1015 adev->pm.dpm.user_state = state; 1016 mutex_unlock(&adev->pm.mutex); 1017 1018 if (is_support_sw_smu(adev)) 1019 return; 1020 1021 if (amdgpu_dpm_dispatch_task(adev, 1022 AMD_PP_TASK_ENABLE_USER_STATE, 1023 &state) == -EOPNOTSUPP) 1024 amdgpu_dpm_compute_clocks(adev); 1025 } 1026 1027 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 1028 { 1029 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1030 enum amd_dpm_forced_level level; 1031 1032 if (!pp_funcs) 1033 return AMD_DPM_FORCED_LEVEL_AUTO; 1034 1035 mutex_lock(&adev->pm.mutex); 1036 if (pp_funcs->get_performance_level) 1037 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 1038 else 1039 level = adev->pm.dpm.forced_level; 1040 mutex_unlock(&adev->pm.mutex); 1041 1042 return level; 1043 } 1044 1045 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev) 1046 { 1047 /* enter UMD Pstate */ 1048 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1049 AMD_PG_STATE_UNGATE); 1050 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1051 AMD_CG_STATE_UNGATE); 1052 } 1053 1054 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev) 1055 { 1056 /* exit UMD Pstate */ 1057 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1058 AMD_CG_STATE_GATE); 1059 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1060 AMD_PG_STATE_GATE); 1061 } 1062 1063 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 1064 enum amd_dpm_forced_level level) 1065 { 1066 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1067 enum amd_dpm_forced_level current_level; 1068 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1069 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1070 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1071 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1072 1073 if (!pp_funcs || !pp_funcs->force_performance_level) 1074 return 0; 1075 1076 if (adev->pm.dpm.thermal_active) 1077 return -EINVAL; 1078 1079 current_level = amdgpu_dpm_get_performance_level(adev); 1080 if (current_level == level) 1081 return 0; 1082 1083 if (!(current_level & profile_mode_mask) && 1084 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 1085 return -EINVAL; 1086 1087 if (adev->asic_type == CHIP_RAVEN) { 1088 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 1089 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1090 level == AMD_DPM_FORCED_LEVEL_MANUAL) 1091 amdgpu_gfx_off_ctrl(adev, false); 1092 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 1093 level != AMD_DPM_FORCED_LEVEL_MANUAL) 1094 amdgpu_gfx_off_ctrl(adev, true); 1095 } 1096 } 1097 1098 if (!(current_level & profile_mode_mask) && (level & profile_mode_mask)) 1099 amdgpu_dpm_enter_umd_state(adev); 1100 else if ((current_level & profile_mode_mask) && 1101 !(level & profile_mode_mask)) 1102 amdgpu_dpm_exit_umd_state(adev); 1103 1104 mutex_lock(&adev->pm.mutex); 1105 1106 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 1107 level)) { 1108 mutex_unlock(&adev->pm.mutex); 1109 /* If new level failed, retain the umd state as before */ 1110 if (!(current_level & profile_mode_mask) && 1111 (level & profile_mode_mask)) 1112 amdgpu_dpm_exit_umd_state(adev); 1113 else if ((current_level & profile_mode_mask) && 1114 !(level & profile_mode_mask)) 1115 amdgpu_dpm_enter_umd_state(adev); 1116 1117 return -EINVAL; 1118 } 1119 1120 adev->pm.dpm.forced_level = level; 1121 1122 mutex_unlock(&adev->pm.mutex); 1123 1124 return 0; 1125 } 1126 1127 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 1128 struct pp_states_info *states) 1129 { 1130 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1131 int ret = 0; 1132 1133 if (!pp_funcs->get_pp_num_states) 1134 return -EOPNOTSUPP; 1135 1136 mutex_lock(&adev->pm.mutex); 1137 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 1138 states); 1139 mutex_unlock(&adev->pm.mutex); 1140 1141 return ret; 1142 } 1143 1144 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 1145 enum amd_pp_task task_id, 1146 enum amd_pm_state_type *user_state) 1147 { 1148 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1149 int ret = 0; 1150 1151 if (!pp_funcs->dispatch_tasks) 1152 return -EOPNOTSUPP; 1153 1154 mutex_lock(&adev->pm.mutex); 1155 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 1156 task_id, 1157 user_state); 1158 mutex_unlock(&adev->pm.mutex); 1159 1160 return ret; 1161 } 1162 1163 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1164 { 1165 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1166 int ret = 0; 1167 1168 if (!pp_funcs->get_pp_table) 1169 return 0; 1170 1171 mutex_lock(&adev->pm.mutex); 1172 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1173 table); 1174 mutex_unlock(&adev->pm.mutex); 1175 1176 return ret; 1177 } 1178 1179 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1180 uint32_t type, 1181 long *input, 1182 uint32_t size) 1183 { 1184 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1185 int ret = 0; 1186 1187 if (!pp_funcs->set_fine_grain_clk_vol) 1188 return 0; 1189 1190 mutex_lock(&adev->pm.mutex); 1191 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1192 type, 1193 input, 1194 size); 1195 mutex_unlock(&adev->pm.mutex); 1196 1197 return ret; 1198 } 1199 1200 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1201 uint32_t type, 1202 long *input, 1203 uint32_t size) 1204 { 1205 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1206 int ret = 0; 1207 1208 if (!pp_funcs->odn_edit_dpm_table) 1209 return 0; 1210 1211 mutex_lock(&adev->pm.mutex); 1212 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1213 type, 1214 input, 1215 size); 1216 mutex_unlock(&adev->pm.mutex); 1217 1218 return ret; 1219 } 1220 1221 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1222 enum pp_clock_type type, 1223 char *buf) 1224 { 1225 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1226 int ret = 0; 1227 1228 if (!pp_funcs->print_clock_levels) 1229 return 0; 1230 1231 mutex_lock(&adev->pm.mutex); 1232 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1233 type, 1234 buf); 1235 mutex_unlock(&adev->pm.mutex); 1236 1237 return ret; 1238 } 1239 1240 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1241 enum pp_clock_type type, 1242 char *buf, 1243 int *offset) 1244 { 1245 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1246 int ret = 0; 1247 1248 if (!pp_funcs->emit_clock_levels) 1249 return -ENOENT; 1250 1251 mutex_lock(&adev->pm.mutex); 1252 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1253 type, 1254 buf, 1255 offset); 1256 mutex_unlock(&adev->pm.mutex); 1257 1258 return ret; 1259 } 1260 1261 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1262 uint64_t ppfeature_masks) 1263 { 1264 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1265 int ret = 0; 1266 1267 if (!pp_funcs->set_ppfeature_status) 1268 return 0; 1269 1270 mutex_lock(&adev->pm.mutex); 1271 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1272 ppfeature_masks); 1273 mutex_unlock(&adev->pm.mutex); 1274 1275 return ret; 1276 } 1277 1278 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1279 { 1280 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1281 int ret = 0; 1282 1283 if (!pp_funcs->get_ppfeature_status) 1284 return 0; 1285 1286 mutex_lock(&adev->pm.mutex); 1287 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1288 buf); 1289 mutex_unlock(&adev->pm.mutex); 1290 1291 return ret; 1292 } 1293 1294 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1295 enum pp_clock_type type, 1296 uint32_t mask) 1297 { 1298 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1299 int ret = 0; 1300 1301 if (!pp_funcs->force_clock_level) 1302 return 0; 1303 1304 mutex_lock(&adev->pm.mutex); 1305 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1306 type, 1307 mask); 1308 mutex_unlock(&adev->pm.mutex); 1309 1310 return ret; 1311 } 1312 1313 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1314 { 1315 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1316 int ret = 0; 1317 1318 if (!pp_funcs->get_sclk_od) 1319 return -EOPNOTSUPP; 1320 1321 mutex_lock(&adev->pm.mutex); 1322 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1323 mutex_unlock(&adev->pm.mutex); 1324 1325 return ret; 1326 } 1327 1328 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1329 { 1330 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1331 1332 if (is_support_sw_smu(adev)) 1333 return -EOPNOTSUPP; 1334 1335 mutex_lock(&adev->pm.mutex); 1336 if (pp_funcs->set_sclk_od) 1337 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1338 mutex_unlock(&adev->pm.mutex); 1339 1340 if (amdgpu_dpm_dispatch_task(adev, 1341 AMD_PP_TASK_READJUST_POWER_STATE, 1342 NULL) == -EOPNOTSUPP) { 1343 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1344 amdgpu_dpm_compute_clocks(adev); 1345 } 1346 1347 return 0; 1348 } 1349 1350 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1351 { 1352 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1353 int ret = 0; 1354 1355 if (!pp_funcs->get_mclk_od) 1356 return -EOPNOTSUPP; 1357 1358 mutex_lock(&adev->pm.mutex); 1359 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1360 mutex_unlock(&adev->pm.mutex); 1361 1362 return ret; 1363 } 1364 1365 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1366 { 1367 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1368 1369 if (is_support_sw_smu(adev)) 1370 return -EOPNOTSUPP; 1371 1372 mutex_lock(&adev->pm.mutex); 1373 if (pp_funcs->set_mclk_od) 1374 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1375 mutex_unlock(&adev->pm.mutex); 1376 1377 if (amdgpu_dpm_dispatch_task(adev, 1378 AMD_PP_TASK_READJUST_POWER_STATE, 1379 NULL) == -EOPNOTSUPP) { 1380 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1381 amdgpu_dpm_compute_clocks(adev); 1382 } 1383 1384 return 0; 1385 } 1386 1387 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1388 char *buf) 1389 { 1390 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1391 int ret = 0; 1392 1393 if (!pp_funcs->get_power_profile_mode) 1394 return -EOPNOTSUPP; 1395 1396 mutex_lock(&adev->pm.mutex); 1397 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1398 buf); 1399 mutex_unlock(&adev->pm.mutex); 1400 1401 return ret; 1402 } 1403 1404 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1405 long *input, uint32_t size) 1406 { 1407 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1408 int ret = 0; 1409 1410 if (!pp_funcs->set_power_profile_mode) 1411 return 0; 1412 1413 mutex_lock(&adev->pm.mutex); 1414 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1415 input, 1416 size); 1417 mutex_unlock(&adev->pm.mutex); 1418 1419 return ret; 1420 } 1421 1422 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1423 { 1424 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1425 int ret = 0; 1426 1427 if (!pp_funcs->get_gpu_metrics) 1428 return 0; 1429 1430 mutex_lock(&adev->pm.mutex); 1431 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1432 table); 1433 mutex_unlock(&adev->pm.mutex); 1434 1435 return ret; 1436 } 1437 1438 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, 1439 size_t size) 1440 { 1441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1442 int ret = 0; 1443 1444 if (!pp_funcs->get_pm_metrics) 1445 return -EOPNOTSUPP; 1446 1447 mutex_lock(&adev->pm.mutex); 1448 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, 1449 size); 1450 mutex_unlock(&adev->pm.mutex); 1451 1452 return ret; 1453 } 1454 1455 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1456 uint32_t *fan_mode) 1457 { 1458 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1459 int ret = 0; 1460 1461 if (!pp_funcs->get_fan_control_mode) 1462 return -EOPNOTSUPP; 1463 1464 mutex_lock(&adev->pm.mutex); 1465 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1466 fan_mode); 1467 mutex_unlock(&adev->pm.mutex); 1468 1469 return ret; 1470 } 1471 1472 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1473 uint32_t speed) 1474 { 1475 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1476 int ret = 0; 1477 1478 if (!pp_funcs->set_fan_speed_pwm) 1479 return -EOPNOTSUPP; 1480 1481 mutex_lock(&adev->pm.mutex); 1482 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1483 speed); 1484 mutex_unlock(&adev->pm.mutex); 1485 1486 return ret; 1487 } 1488 1489 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1490 uint32_t *speed) 1491 { 1492 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1493 int ret = 0; 1494 1495 if (!pp_funcs->get_fan_speed_pwm) 1496 return -EOPNOTSUPP; 1497 1498 mutex_lock(&adev->pm.mutex); 1499 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1500 speed); 1501 mutex_unlock(&adev->pm.mutex); 1502 1503 return ret; 1504 } 1505 1506 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1507 uint32_t *speed) 1508 { 1509 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1510 int ret = 0; 1511 1512 if (!pp_funcs->get_fan_speed_rpm) 1513 return -EOPNOTSUPP; 1514 1515 mutex_lock(&adev->pm.mutex); 1516 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1517 speed); 1518 mutex_unlock(&adev->pm.mutex); 1519 1520 return ret; 1521 } 1522 1523 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1524 uint32_t speed) 1525 { 1526 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1527 int ret = 0; 1528 1529 if (!pp_funcs->set_fan_speed_rpm) 1530 return -EOPNOTSUPP; 1531 1532 mutex_lock(&adev->pm.mutex); 1533 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1534 speed); 1535 mutex_unlock(&adev->pm.mutex); 1536 1537 return ret; 1538 } 1539 1540 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1541 uint32_t mode) 1542 { 1543 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1544 int ret = 0; 1545 1546 if (!pp_funcs->set_fan_control_mode) 1547 return -EOPNOTSUPP; 1548 1549 mutex_lock(&adev->pm.mutex); 1550 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1551 mode); 1552 mutex_unlock(&adev->pm.mutex); 1553 1554 return ret; 1555 } 1556 1557 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1558 uint32_t *limit, 1559 enum pp_power_limit_level pp_limit_level, 1560 enum pp_power_type power_type) 1561 { 1562 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1563 int ret = 0; 1564 1565 if (!pp_funcs->get_power_limit) 1566 return -ENODATA; 1567 1568 mutex_lock(&adev->pm.mutex); 1569 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1570 limit, 1571 pp_limit_level, 1572 power_type); 1573 mutex_unlock(&adev->pm.mutex); 1574 1575 return ret; 1576 } 1577 1578 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1579 uint32_t limit) 1580 { 1581 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1582 int ret = 0; 1583 1584 if (!pp_funcs->set_power_limit) 1585 return -EINVAL; 1586 1587 mutex_lock(&adev->pm.mutex); 1588 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1589 limit); 1590 mutex_unlock(&adev->pm.mutex); 1591 1592 return ret; 1593 } 1594 1595 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1596 { 1597 bool cclk_dpm_supported = false; 1598 1599 if (!is_support_sw_smu(adev)) 1600 return false; 1601 1602 mutex_lock(&adev->pm.mutex); 1603 cclk_dpm_supported = is_support_cclk_dpm(adev); 1604 mutex_unlock(&adev->pm.mutex); 1605 1606 return (int)cclk_dpm_supported; 1607 } 1608 1609 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1610 struct seq_file *m) 1611 { 1612 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1613 1614 if (!pp_funcs->debugfs_print_current_performance_level) 1615 return -EOPNOTSUPP; 1616 1617 mutex_lock(&adev->pm.mutex); 1618 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1619 m); 1620 mutex_unlock(&adev->pm.mutex); 1621 1622 return 0; 1623 } 1624 1625 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1626 void **addr, 1627 size_t *size) 1628 { 1629 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1630 int ret = 0; 1631 1632 if (!pp_funcs->get_smu_prv_buf_details) 1633 return -ENOSYS; 1634 1635 mutex_lock(&adev->pm.mutex); 1636 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1637 addr, 1638 size); 1639 mutex_unlock(&adev->pm.mutex); 1640 1641 return ret; 1642 } 1643 1644 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1645 { 1646 if (is_support_sw_smu(adev)) { 1647 struct smu_context *smu = adev->powerplay.pp_handle; 1648 1649 return (smu->od_enabled || smu->is_apu); 1650 } else { 1651 struct pp_hwmgr *hwmgr; 1652 1653 /* 1654 * dpm on some legacy asics don't carry od_enabled member 1655 * as its pp_handle is casted directly from adev. 1656 */ 1657 if (amdgpu_dpm_is_legacy_dpm(adev)) 1658 return false; 1659 1660 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1661 1662 return hwmgr->od_enabled; 1663 } 1664 } 1665 1666 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1667 const char *buf, 1668 size_t size) 1669 { 1670 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1671 int ret = 0; 1672 1673 if (!pp_funcs->set_pp_table) 1674 return -EOPNOTSUPP; 1675 1676 mutex_lock(&adev->pm.mutex); 1677 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1678 buf, 1679 size); 1680 mutex_unlock(&adev->pm.mutex); 1681 1682 return ret; 1683 } 1684 1685 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1686 { 1687 struct smu_context *smu = adev->powerplay.pp_handle; 1688 1689 if (!is_support_sw_smu(adev)) 1690 return INT_MAX; 1691 1692 return smu->cpu_core_num; 1693 } 1694 1695 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1696 { 1697 if (!is_support_sw_smu(adev)) 1698 return; 1699 1700 amdgpu_smu_stb_debug_fs_init(adev); 1701 } 1702 1703 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1704 const struct amd_pp_display_configuration *input) 1705 { 1706 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1707 int ret = 0; 1708 1709 if (!pp_funcs->display_configuration_change) 1710 return 0; 1711 1712 mutex_lock(&adev->pm.mutex); 1713 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1714 input); 1715 mutex_unlock(&adev->pm.mutex); 1716 1717 return ret; 1718 } 1719 1720 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1721 enum amd_pp_clock_type type, 1722 struct amd_pp_clocks *clocks) 1723 { 1724 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1725 int ret = 0; 1726 1727 if (!pp_funcs->get_clock_by_type) 1728 return 0; 1729 1730 mutex_lock(&adev->pm.mutex); 1731 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1732 type, 1733 clocks); 1734 mutex_unlock(&adev->pm.mutex); 1735 1736 return ret; 1737 } 1738 1739 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1740 struct amd_pp_simple_clock_info *clocks) 1741 { 1742 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1743 int ret = 0; 1744 1745 if (!pp_funcs->get_display_mode_validation_clocks) 1746 return 0; 1747 1748 mutex_lock(&adev->pm.mutex); 1749 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1750 clocks); 1751 mutex_unlock(&adev->pm.mutex); 1752 1753 return ret; 1754 } 1755 1756 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1757 enum amd_pp_clock_type type, 1758 struct pp_clock_levels_with_latency *clocks) 1759 { 1760 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1761 int ret = 0; 1762 1763 if (!pp_funcs->get_clock_by_type_with_latency) 1764 return 0; 1765 1766 mutex_lock(&adev->pm.mutex); 1767 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1768 type, 1769 clocks); 1770 mutex_unlock(&adev->pm.mutex); 1771 1772 return ret; 1773 } 1774 1775 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1776 enum amd_pp_clock_type type, 1777 struct pp_clock_levels_with_voltage *clocks) 1778 { 1779 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1780 int ret = 0; 1781 1782 if (!pp_funcs->get_clock_by_type_with_voltage) 1783 return 0; 1784 1785 mutex_lock(&adev->pm.mutex); 1786 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1787 type, 1788 clocks); 1789 mutex_unlock(&adev->pm.mutex); 1790 1791 return ret; 1792 } 1793 1794 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1795 void *clock_ranges) 1796 { 1797 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1798 int ret = 0; 1799 1800 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1801 return -EOPNOTSUPP; 1802 1803 mutex_lock(&adev->pm.mutex); 1804 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1805 clock_ranges); 1806 mutex_unlock(&adev->pm.mutex); 1807 1808 return ret; 1809 } 1810 1811 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1812 struct pp_display_clock_request *clock) 1813 { 1814 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1815 int ret = 0; 1816 1817 if (!pp_funcs->display_clock_voltage_request) 1818 return -EOPNOTSUPP; 1819 1820 mutex_lock(&adev->pm.mutex); 1821 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1822 clock); 1823 mutex_unlock(&adev->pm.mutex); 1824 1825 return ret; 1826 } 1827 1828 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1829 struct amd_pp_clock_info *clocks) 1830 { 1831 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1832 int ret = 0; 1833 1834 if (!pp_funcs->get_current_clocks) 1835 return -EOPNOTSUPP; 1836 1837 mutex_lock(&adev->pm.mutex); 1838 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1839 clocks); 1840 mutex_unlock(&adev->pm.mutex); 1841 1842 return ret; 1843 } 1844 1845 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1846 { 1847 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1848 1849 if (!pp_funcs->notify_smu_enable_pwe) 1850 return; 1851 1852 mutex_lock(&adev->pm.mutex); 1853 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1854 mutex_unlock(&adev->pm.mutex); 1855 } 1856 1857 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1858 uint32_t count) 1859 { 1860 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1861 int ret = 0; 1862 1863 if (!pp_funcs->set_active_display_count) 1864 return -EOPNOTSUPP; 1865 1866 mutex_lock(&adev->pm.mutex); 1867 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1868 count); 1869 mutex_unlock(&adev->pm.mutex); 1870 1871 return ret; 1872 } 1873 1874 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1875 uint32_t clock) 1876 { 1877 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1878 int ret = 0; 1879 1880 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1881 return -EOPNOTSUPP; 1882 1883 mutex_lock(&adev->pm.mutex); 1884 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1885 clock); 1886 mutex_unlock(&adev->pm.mutex); 1887 1888 return ret; 1889 } 1890 1891 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1892 uint32_t clock) 1893 { 1894 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1895 1896 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1897 return; 1898 1899 mutex_lock(&adev->pm.mutex); 1900 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1901 clock); 1902 mutex_unlock(&adev->pm.mutex); 1903 } 1904 1905 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1906 uint32_t clock) 1907 { 1908 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1909 1910 if (!pp_funcs->set_hard_min_fclk_by_freq) 1911 return; 1912 1913 mutex_lock(&adev->pm.mutex); 1914 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1915 clock); 1916 mutex_unlock(&adev->pm.mutex); 1917 } 1918 1919 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1920 bool disable_memory_clock_switch) 1921 { 1922 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1923 int ret = 0; 1924 1925 if (!pp_funcs->display_disable_memory_clock_switch) 1926 return 0; 1927 1928 mutex_lock(&adev->pm.mutex); 1929 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1930 disable_memory_clock_switch); 1931 mutex_unlock(&adev->pm.mutex); 1932 1933 return ret; 1934 } 1935 1936 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1937 struct pp_smu_nv_clock_table *max_clocks) 1938 { 1939 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1940 int ret = 0; 1941 1942 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1943 return -EOPNOTSUPP; 1944 1945 mutex_lock(&adev->pm.mutex); 1946 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1947 max_clocks); 1948 mutex_unlock(&adev->pm.mutex); 1949 1950 return ret; 1951 } 1952 1953 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1954 unsigned int *clock_values_in_khz, 1955 unsigned int *num_states) 1956 { 1957 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1958 int ret = 0; 1959 1960 if (!pp_funcs->get_uclk_dpm_states) 1961 return -EOPNOTSUPP; 1962 1963 mutex_lock(&adev->pm.mutex); 1964 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1965 clock_values_in_khz, 1966 num_states); 1967 mutex_unlock(&adev->pm.mutex); 1968 1969 return ret; 1970 } 1971 1972 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1973 struct dpm_clocks *clock_table) 1974 { 1975 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1976 int ret = 0; 1977 1978 if (!pp_funcs->get_dpm_clock_table) 1979 return -EOPNOTSUPP; 1980 1981 mutex_lock(&adev->pm.mutex); 1982 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1983 clock_table); 1984 mutex_unlock(&adev->pm.mutex); 1985 1986 return ret; 1987 } 1988