1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, 74 uint32_t block_type, 75 bool gate, 76 int inst) 77 { 78 int ret = 0; 79 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 80 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 81 bool is_vcn = (block_type == AMD_IP_BLOCK_TYPE_UVD || block_type == AMD_IP_BLOCK_TYPE_VCN); 82 83 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state && 84 (!is_vcn || adev->vcn.num_vcn_inst == 1)) { 85 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 86 block_type, gate ? "gate" : "ungate"); 87 return 0; 88 } 89 90 mutex_lock(&adev->pm.mutex); 91 92 switch (block_type) { 93 case AMD_IP_BLOCK_TYPE_UVD: 94 case AMD_IP_BLOCK_TYPE_VCE: 95 case AMD_IP_BLOCK_TYPE_GFX: 96 case AMD_IP_BLOCK_TYPE_SDMA: 97 case AMD_IP_BLOCK_TYPE_JPEG: 98 case AMD_IP_BLOCK_TYPE_GMC: 99 case AMD_IP_BLOCK_TYPE_ACP: 100 case AMD_IP_BLOCK_TYPE_VPE: 101 if (pp_funcs && pp_funcs->set_powergating_by_smu) 102 ret = (pp_funcs->set_powergating_by_smu( 103 (adev)->powerplay.pp_handle, block_type, gate, 0)); 104 break; 105 case AMD_IP_BLOCK_TYPE_VCN: 106 if (pp_funcs && pp_funcs->set_powergating_by_smu) 107 ret = (pp_funcs->set_powergating_by_smu( 108 (adev)->powerplay.pp_handle, block_type, gate, inst)); 109 break; 110 default: 111 break; 112 } 113 114 if (!ret) 115 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 116 117 mutex_unlock(&adev->pm.mutex); 118 119 return ret; 120 } 121 122 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 123 { 124 struct smu_context *smu = adev->powerplay.pp_handle; 125 int ret = -EOPNOTSUPP; 126 127 mutex_lock(&adev->pm.mutex); 128 ret = smu_set_gfx_power_up_by_imu(smu); 129 mutex_unlock(&adev->pm.mutex); 130 131 msleep(10); 132 133 return ret; 134 } 135 136 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 137 { 138 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 139 void *pp_handle = adev->powerplay.pp_handle; 140 int ret = 0; 141 142 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 143 return -ENOENT; 144 145 mutex_lock(&adev->pm.mutex); 146 147 /* enter BACO state */ 148 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 149 150 mutex_unlock(&adev->pm.mutex); 151 152 return ret; 153 } 154 155 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 156 { 157 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 158 void *pp_handle = adev->powerplay.pp_handle; 159 int ret = 0; 160 161 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 162 return -ENOENT; 163 164 mutex_lock(&adev->pm.mutex); 165 166 /* exit BACO state */ 167 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 168 169 mutex_unlock(&adev->pm.mutex); 170 171 return ret; 172 } 173 174 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 175 enum pp_mp1_state mp1_state) 176 { 177 int ret = 0; 178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 179 180 if (mp1_state == PP_MP1_STATE_FLR) { 181 /* VF lost access to SMU */ 182 if (amdgpu_sriov_vf(adev)) 183 adev->pm.dpm_enabled = false; 184 } else if (pp_funcs && pp_funcs->set_mp1_state) { 185 mutex_lock(&adev->pm.mutex); 186 187 ret = pp_funcs->set_mp1_state( 188 adev->powerplay.pp_handle, 189 mp1_state); 190 191 mutex_unlock(&adev->pm.mutex); 192 } 193 194 return ret; 195 } 196 197 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) 198 { 199 int ret = 0; 200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 201 202 if (pp_funcs && pp_funcs->notify_rlc_state) { 203 mutex_lock(&adev->pm.mutex); 204 205 ret = pp_funcs->notify_rlc_state( 206 adev->powerplay.pp_handle, 207 en); 208 209 mutex_unlock(&adev->pm.mutex); 210 } 211 212 return ret; 213 } 214 215 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 216 { 217 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 218 void *pp_handle = adev->powerplay.pp_handle; 219 int ret; 220 221 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 222 return 0; 223 /* Don't use baco for reset in S3. 224 * This is a workaround for some platforms 225 * where entering BACO during suspend 226 * seems to cause reboots or hangs. 227 * This might be related to the fact that BACO controls 228 * power to the whole GPU including devices like audio and USB. 229 * Powering down/up everything may adversely affect these other 230 * devices. Needs more investigation. 231 */ 232 if (adev->in_s3) 233 return 0; 234 235 mutex_lock(&adev->pm.mutex); 236 237 ret = pp_funcs->get_asic_baco_capability(pp_handle); 238 239 mutex_unlock(&adev->pm.mutex); 240 241 return ret; 242 } 243 244 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 245 { 246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 247 void *pp_handle = adev->powerplay.pp_handle; 248 int ret = 0; 249 250 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 251 return -ENOENT; 252 253 mutex_lock(&adev->pm.mutex); 254 255 ret = pp_funcs->asic_reset_mode_2(pp_handle); 256 257 mutex_unlock(&adev->pm.mutex); 258 259 return ret; 260 } 261 262 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 263 { 264 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 265 void *pp_handle = adev->powerplay.pp_handle; 266 int ret = 0; 267 268 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 269 return -ENOENT; 270 271 mutex_lock(&adev->pm.mutex); 272 273 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 274 275 mutex_unlock(&adev->pm.mutex); 276 277 return ret; 278 } 279 280 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 281 { 282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 283 void *pp_handle = adev->powerplay.pp_handle; 284 int ret = 0; 285 286 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 287 return -ENOENT; 288 289 mutex_lock(&adev->pm.mutex); 290 291 /* enter BACO state */ 292 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 293 if (ret) 294 goto out; 295 296 /* exit BACO state */ 297 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 298 299 out: 300 mutex_unlock(&adev->pm.mutex); 301 return ret; 302 } 303 304 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 305 { 306 struct smu_context *smu = adev->powerplay.pp_handle; 307 bool support_mode1_reset = false; 308 309 if (is_support_sw_smu(adev)) { 310 mutex_lock(&adev->pm.mutex); 311 support_mode1_reset = smu_mode1_reset_is_support(smu); 312 mutex_unlock(&adev->pm.mutex); 313 } 314 315 return support_mode1_reset; 316 } 317 318 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 319 { 320 struct smu_context *smu = adev->powerplay.pp_handle; 321 int ret = -EOPNOTSUPP; 322 323 if (is_support_sw_smu(adev)) { 324 mutex_lock(&adev->pm.mutex); 325 ret = smu_mode1_reset(smu); 326 mutex_unlock(&adev->pm.mutex); 327 } 328 329 return ret; 330 } 331 332 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 333 enum PP_SMC_POWER_PROFILE type, 334 bool en) 335 { 336 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 337 int ret = 0; 338 339 if (amdgpu_sriov_vf(adev)) 340 return 0; 341 342 if (pp_funcs && pp_funcs->switch_power_profile) { 343 mutex_lock(&adev->pm.mutex); 344 ret = pp_funcs->switch_power_profile( 345 adev->powerplay.pp_handle, type, en); 346 mutex_unlock(&adev->pm.mutex); 347 } 348 349 return ret; 350 } 351 352 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 353 uint32_t pstate) 354 { 355 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 356 int ret = 0; 357 358 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 359 mutex_lock(&adev->pm.mutex); 360 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 361 pstate); 362 mutex_unlock(&adev->pm.mutex); 363 } 364 365 return ret; 366 } 367 368 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 369 uint32_t cstate) 370 { 371 int ret = 0; 372 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 373 void *pp_handle = adev->powerplay.pp_handle; 374 375 if (pp_funcs && pp_funcs->set_df_cstate) { 376 mutex_lock(&adev->pm.mutex); 377 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 378 mutex_unlock(&adev->pm.mutex); 379 } 380 381 return ret; 382 } 383 384 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, 385 enum pp_pm_policy p_type, char *buf) 386 { 387 struct smu_context *smu = adev->powerplay.pp_handle; 388 int ret = -EOPNOTSUPP; 389 390 if (is_support_sw_smu(adev)) { 391 mutex_lock(&adev->pm.mutex); 392 ret = smu_get_pm_policy_info(smu, p_type, buf); 393 mutex_unlock(&adev->pm.mutex); 394 } 395 396 return ret; 397 } 398 399 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, 400 int policy_level) 401 { 402 struct smu_context *smu = adev->powerplay.pp_handle; 403 int ret = -EOPNOTSUPP; 404 405 if (is_support_sw_smu(adev)) { 406 mutex_lock(&adev->pm.mutex); 407 ret = smu_set_pm_policy(smu, policy_type, policy_level); 408 mutex_unlock(&adev->pm.mutex); 409 } 410 411 return ret; 412 } 413 414 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 415 { 416 void *pp_handle = adev->powerplay.pp_handle; 417 const struct amd_pm_funcs *pp_funcs = 418 adev->powerplay.pp_funcs; 419 int ret = 0; 420 421 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 422 mutex_lock(&adev->pm.mutex); 423 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 424 mutex_unlock(&adev->pm.mutex); 425 } 426 427 return ret; 428 } 429 430 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 431 uint32_t msg_id) 432 { 433 void *pp_handle = adev->powerplay.pp_handle; 434 const struct amd_pm_funcs *pp_funcs = 435 adev->powerplay.pp_funcs; 436 int ret = 0; 437 438 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 439 mutex_lock(&adev->pm.mutex); 440 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 441 msg_id); 442 mutex_unlock(&adev->pm.mutex); 443 } 444 445 return ret; 446 } 447 448 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 449 bool acquire) 450 { 451 void *pp_handle = adev->powerplay.pp_handle; 452 const struct amd_pm_funcs *pp_funcs = 453 adev->powerplay.pp_funcs; 454 int ret = -EOPNOTSUPP; 455 456 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 457 mutex_lock(&adev->pm.mutex); 458 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 459 acquire); 460 mutex_unlock(&adev->pm.mutex); 461 } 462 463 return ret; 464 } 465 466 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 467 { 468 if (adev->pm.dpm_enabled) { 469 mutex_lock(&adev->pm.mutex); 470 if (power_supply_is_system_supplied() > 0) 471 adev->pm.ac_power = true; 472 else 473 adev->pm.ac_power = false; 474 475 if (adev->powerplay.pp_funcs && 476 adev->powerplay.pp_funcs->enable_bapm) 477 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 478 479 if (is_support_sw_smu(adev)) 480 smu_set_ac_dc(adev->powerplay.pp_handle); 481 482 mutex_unlock(&adev->pm.mutex); 483 } 484 } 485 486 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 487 void *data, uint32_t *size) 488 { 489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 490 int ret = -EINVAL; 491 492 if (!data || !size) 493 return -EINVAL; 494 495 if (pp_funcs && pp_funcs->read_sensor) { 496 mutex_lock(&adev->pm.mutex); 497 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 498 sensor, 499 data, 500 size); 501 mutex_unlock(&adev->pm.mutex); 502 } 503 504 return ret; 505 } 506 507 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 508 { 509 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 510 int ret = -EOPNOTSUPP; 511 512 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 513 mutex_lock(&adev->pm.mutex); 514 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 515 mutex_unlock(&adev->pm.mutex); 516 } 517 518 return ret; 519 } 520 521 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 522 { 523 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 524 int ret = -EOPNOTSUPP; 525 526 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 527 mutex_lock(&adev->pm.mutex); 528 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 529 mutex_unlock(&adev->pm.mutex); 530 } 531 532 return ret; 533 } 534 535 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 536 { 537 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 538 int i; 539 540 if (!adev->pm.dpm_enabled) 541 return; 542 543 if (!pp_funcs->pm_compute_clocks) 544 return; 545 546 if (adev->mode_info.num_crtc) 547 amdgpu_display_bandwidth_update(adev); 548 549 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 550 struct amdgpu_ring *ring = adev->rings[i]; 551 if (ring && ring->sched.ready) 552 amdgpu_fence_wait_empty(ring); 553 } 554 555 mutex_lock(&adev->pm.mutex); 556 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 557 mutex_unlock(&adev->pm.mutex); 558 } 559 560 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 561 { 562 int ret = 0; 563 564 if (adev->family == AMDGPU_FAMILY_SI) { 565 mutex_lock(&adev->pm.mutex); 566 if (enable) { 567 adev->pm.dpm.uvd_active = true; 568 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 569 } else { 570 adev->pm.dpm.uvd_active = false; 571 } 572 mutex_unlock(&adev->pm.mutex); 573 574 amdgpu_dpm_compute_clocks(adev); 575 return; 576 } 577 578 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0); 579 if (ret) 580 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 581 enable ? "enable" : "disable", ret); 582 } 583 584 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst) 585 { 586 int ret = 0; 587 588 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst); 589 if (ret) 590 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 591 enable ? "enable" : "disable", ret); 592 } 593 594 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 595 { 596 int ret = 0; 597 598 if (adev->family == AMDGPU_FAMILY_SI) { 599 mutex_lock(&adev->pm.mutex); 600 if (enable) { 601 adev->pm.dpm.vce_active = true; 602 /* XXX select vce level based on ring/task */ 603 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 604 } else { 605 adev->pm.dpm.vce_active = false; 606 } 607 mutex_unlock(&adev->pm.mutex); 608 609 amdgpu_dpm_compute_clocks(adev); 610 return; 611 } 612 613 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0); 614 if (ret) 615 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 616 enable ? "enable" : "disable", ret); 617 } 618 619 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 620 { 621 int ret = 0; 622 623 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0); 624 if (ret) 625 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 626 enable ? "enable" : "disable", ret); 627 } 628 629 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) 630 { 631 int ret = 0; 632 633 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0); 634 if (ret) 635 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", 636 enable ? "enable" : "disable", ret); 637 } 638 639 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 640 { 641 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 642 int r = 0; 643 644 if (!pp_funcs || !pp_funcs->load_firmware || 645 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU))) 646 return 0; 647 648 mutex_lock(&adev->pm.mutex); 649 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 650 if (r) { 651 pr_err("smu firmware loading failed\n"); 652 goto out; 653 } 654 655 if (smu_version) 656 *smu_version = adev->pm.fw_version; 657 658 out: 659 mutex_unlock(&adev->pm.mutex); 660 return r; 661 } 662 663 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 664 { 665 int ret = 0; 666 667 if (is_support_sw_smu(adev)) { 668 mutex_lock(&adev->pm.mutex); 669 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 670 enable); 671 mutex_unlock(&adev->pm.mutex); 672 } 673 674 return ret; 675 } 676 677 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 678 { 679 struct smu_context *smu = adev->powerplay.pp_handle; 680 int ret = 0; 681 682 if (!is_support_sw_smu(adev)) 683 return -EOPNOTSUPP; 684 685 mutex_lock(&adev->pm.mutex); 686 ret = smu_send_hbm_bad_pages_num(smu, size); 687 mutex_unlock(&adev->pm.mutex); 688 689 return ret; 690 } 691 692 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 693 { 694 struct smu_context *smu = adev->powerplay.pp_handle; 695 int ret = 0; 696 697 if (!is_support_sw_smu(adev)) 698 return -EOPNOTSUPP; 699 700 mutex_lock(&adev->pm.mutex); 701 ret = smu_send_hbm_bad_channel_flag(smu, size); 702 mutex_unlock(&adev->pm.mutex); 703 704 return ret; 705 } 706 707 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) 708 { 709 struct smu_context *smu = adev->powerplay.pp_handle; 710 int ret; 711 712 if (!is_support_sw_smu(adev)) 713 return -EOPNOTSUPP; 714 715 mutex_lock(&adev->pm.mutex); 716 ret = smu_send_rma_reason(smu); 717 mutex_unlock(&adev->pm.mutex); 718 719 return ret; 720 } 721 722 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask) 723 { 724 struct smu_context *smu = adev->powerplay.pp_handle; 725 int ret; 726 727 if (!is_support_sw_smu(adev)) 728 return -EOPNOTSUPP; 729 730 mutex_lock(&adev->pm.mutex); 731 ret = smu_reset_sdma(smu, inst_mask); 732 mutex_unlock(&adev->pm.mutex); 733 734 return ret; 735 } 736 737 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 738 enum pp_clock_type type, 739 uint32_t *min, 740 uint32_t *max) 741 { 742 int ret = 0; 743 744 if (type != PP_SCLK) 745 return -EINVAL; 746 747 if (!is_support_sw_smu(adev)) 748 return -EOPNOTSUPP; 749 750 mutex_lock(&adev->pm.mutex); 751 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 752 SMU_SCLK, 753 min, 754 max); 755 mutex_unlock(&adev->pm.mutex); 756 757 return ret; 758 } 759 760 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 761 enum pp_clock_type type, 762 uint32_t min, 763 uint32_t max) 764 { 765 struct smu_context *smu = adev->powerplay.pp_handle; 766 int ret = 0; 767 768 if (type != PP_SCLK) 769 return -EINVAL; 770 771 if (!is_support_sw_smu(adev)) 772 return -EOPNOTSUPP; 773 774 mutex_lock(&adev->pm.mutex); 775 ret = smu_set_soft_freq_range(smu, 776 SMU_SCLK, 777 min, 778 max); 779 mutex_unlock(&adev->pm.mutex); 780 781 return ret; 782 } 783 784 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 785 { 786 struct smu_context *smu = adev->powerplay.pp_handle; 787 int ret = 0; 788 789 if (!is_support_sw_smu(adev)) 790 return 0; 791 792 mutex_lock(&adev->pm.mutex); 793 ret = smu_write_watermarks_table(smu); 794 mutex_unlock(&adev->pm.mutex); 795 796 return ret; 797 } 798 799 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 800 enum smu_event_type event, 801 uint64_t event_arg) 802 { 803 struct smu_context *smu = adev->powerplay.pp_handle; 804 int ret = 0; 805 806 if (!is_support_sw_smu(adev)) 807 return -EOPNOTSUPP; 808 809 mutex_lock(&adev->pm.mutex); 810 ret = smu_wait_for_event(smu, event, event_arg); 811 mutex_unlock(&adev->pm.mutex); 812 813 return ret; 814 } 815 816 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 817 { 818 struct smu_context *smu = adev->powerplay.pp_handle; 819 int ret = 0; 820 821 if (!is_support_sw_smu(adev)) 822 return -EOPNOTSUPP; 823 824 mutex_lock(&adev->pm.mutex); 825 ret = smu_set_residency_gfxoff(smu, value); 826 mutex_unlock(&adev->pm.mutex); 827 828 return ret; 829 } 830 831 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 832 { 833 struct smu_context *smu = adev->powerplay.pp_handle; 834 int ret = 0; 835 836 if (!is_support_sw_smu(adev)) 837 return -EOPNOTSUPP; 838 839 mutex_lock(&adev->pm.mutex); 840 ret = smu_get_residency_gfxoff(smu, value); 841 mutex_unlock(&adev->pm.mutex); 842 843 return ret; 844 } 845 846 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 847 { 848 struct smu_context *smu = adev->powerplay.pp_handle; 849 int ret = 0; 850 851 if (!is_support_sw_smu(adev)) 852 return -EOPNOTSUPP; 853 854 mutex_lock(&adev->pm.mutex); 855 ret = smu_get_entrycount_gfxoff(smu, value); 856 mutex_unlock(&adev->pm.mutex); 857 858 return ret; 859 } 860 861 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 862 { 863 struct smu_context *smu = adev->powerplay.pp_handle; 864 int ret = 0; 865 866 if (!is_support_sw_smu(adev)) 867 return -EOPNOTSUPP; 868 869 mutex_lock(&adev->pm.mutex); 870 ret = smu_get_status_gfxoff(smu, value); 871 mutex_unlock(&adev->pm.mutex); 872 873 return ret; 874 } 875 876 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 877 { 878 struct smu_context *smu = adev->powerplay.pp_handle; 879 880 if (!is_support_sw_smu(adev)) 881 return 0; 882 883 return atomic64_read(&smu->throttle_int_counter); 884 } 885 886 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 887 * @adev: amdgpu_device pointer 888 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 889 * 890 */ 891 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 892 enum gfx_change_state state) 893 { 894 mutex_lock(&adev->pm.mutex); 895 if (adev->powerplay.pp_funcs && 896 adev->powerplay.pp_funcs->gfx_state_change_set) 897 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 898 (adev)->powerplay.pp_handle, state)); 899 mutex_unlock(&adev->pm.mutex); 900 } 901 902 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 903 void *umc_ecc) 904 { 905 struct smu_context *smu = adev->powerplay.pp_handle; 906 int ret = 0; 907 908 if (!is_support_sw_smu(adev)) 909 return -EOPNOTSUPP; 910 911 mutex_lock(&adev->pm.mutex); 912 ret = smu_get_ecc_info(smu, umc_ecc); 913 mutex_unlock(&adev->pm.mutex); 914 915 return ret; 916 } 917 918 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 919 uint32_t idx) 920 { 921 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 922 struct amd_vce_state *vstate = NULL; 923 924 if (!pp_funcs->get_vce_clock_state) 925 return NULL; 926 927 mutex_lock(&adev->pm.mutex); 928 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 929 idx); 930 mutex_unlock(&adev->pm.mutex); 931 932 return vstate; 933 } 934 935 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 936 enum amd_pm_state_type *state) 937 { 938 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 939 940 mutex_lock(&adev->pm.mutex); 941 942 if (!pp_funcs->get_current_power_state) { 943 *state = adev->pm.dpm.user_state; 944 goto out; 945 } 946 947 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 948 if (*state < POWER_STATE_TYPE_DEFAULT || 949 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 950 *state = adev->pm.dpm.user_state; 951 952 out: 953 mutex_unlock(&adev->pm.mutex); 954 } 955 956 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 957 enum amd_pm_state_type state) 958 { 959 mutex_lock(&adev->pm.mutex); 960 adev->pm.dpm.user_state = state; 961 mutex_unlock(&adev->pm.mutex); 962 963 if (is_support_sw_smu(adev)) 964 return; 965 966 if (amdgpu_dpm_dispatch_task(adev, 967 AMD_PP_TASK_ENABLE_USER_STATE, 968 &state) == -EOPNOTSUPP) 969 amdgpu_dpm_compute_clocks(adev); 970 } 971 972 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 973 { 974 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 975 enum amd_dpm_forced_level level; 976 977 if (!pp_funcs) 978 return AMD_DPM_FORCED_LEVEL_AUTO; 979 980 mutex_lock(&adev->pm.mutex); 981 if (pp_funcs->get_performance_level) 982 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 983 else 984 level = adev->pm.dpm.forced_level; 985 mutex_unlock(&adev->pm.mutex); 986 987 return level; 988 } 989 990 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 991 enum amd_dpm_forced_level level) 992 { 993 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 994 enum amd_dpm_forced_level current_level; 995 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 996 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 997 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 998 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 999 1000 if (!pp_funcs || !pp_funcs->force_performance_level) 1001 return 0; 1002 1003 if (adev->pm.dpm.thermal_active) 1004 return -EINVAL; 1005 1006 current_level = amdgpu_dpm_get_performance_level(adev); 1007 if (current_level == level) 1008 return 0; 1009 1010 if (adev->asic_type == CHIP_RAVEN) { 1011 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 1012 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1013 level == AMD_DPM_FORCED_LEVEL_MANUAL) 1014 amdgpu_gfx_off_ctrl(adev, false); 1015 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 1016 level != AMD_DPM_FORCED_LEVEL_MANUAL) 1017 amdgpu_gfx_off_ctrl(adev, true); 1018 } 1019 } 1020 1021 if (!(current_level & profile_mode_mask) && 1022 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 1023 return -EINVAL; 1024 1025 if (!(current_level & profile_mode_mask) && 1026 (level & profile_mode_mask)) { 1027 /* enter UMD Pstate */ 1028 amdgpu_device_ip_set_powergating_state(adev, 1029 AMD_IP_BLOCK_TYPE_GFX, 1030 AMD_PG_STATE_UNGATE); 1031 amdgpu_device_ip_set_clockgating_state(adev, 1032 AMD_IP_BLOCK_TYPE_GFX, 1033 AMD_CG_STATE_UNGATE); 1034 } else if ((current_level & profile_mode_mask) && 1035 !(level & profile_mode_mask)) { 1036 /* exit UMD Pstate */ 1037 amdgpu_device_ip_set_clockgating_state(adev, 1038 AMD_IP_BLOCK_TYPE_GFX, 1039 AMD_CG_STATE_GATE); 1040 amdgpu_device_ip_set_powergating_state(adev, 1041 AMD_IP_BLOCK_TYPE_GFX, 1042 AMD_PG_STATE_GATE); 1043 } 1044 1045 mutex_lock(&adev->pm.mutex); 1046 1047 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 1048 level)) { 1049 mutex_unlock(&adev->pm.mutex); 1050 return -EINVAL; 1051 } 1052 1053 adev->pm.dpm.forced_level = level; 1054 1055 mutex_unlock(&adev->pm.mutex); 1056 1057 return 0; 1058 } 1059 1060 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 1061 struct pp_states_info *states) 1062 { 1063 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1064 int ret = 0; 1065 1066 if (!pp_funcs->get_pp_num_states) 1067 return -EOPNOTSUPP; 1068 1069 mutex_lock(&adev->pm.mutex); 1070 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 1071 states); 1072 mutex_unlock(&adev->pm.mutex); 1073 1074 return ret; 1075 } 1076 1077 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 1078 enum amd_pp_task task_id, 1079 enum amd_pm_state_type *user_state) 1080 { 1081 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1082 int ret = 0; 1083 1084 if (!pp_funcs->dispatch_tasks) 1085 return -EOPNOTSUPP; 1086 1087 mutex_lock(&adev->pm.mutex); 1088 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 1089 task_id, 1090 user_state); 1091 mutex_unlock(&adev->pm.mutex); 1092 1093 return ret; 1094 } 1095 1096 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1097 { 1098 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1099 int ret = 0; 1100 1101 if (!pp_funcs->get_pp_table) 1102 return 0; 1103 1104 mutex_lock(&adev->pm.mutex); 1105 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1106 table); 1107 mutex_unlock(&adev->pm.mutex); 1108 1109 return ret; 1110 } 1111 1112 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1113 uint32_t type, 1114 long *input, 1115 uint32_t size) 1116 { 1117 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1118 int ret = 0; 1119 1120 if (!pp_funcs->set_fine_grain_clk_vol) 1121 return 0; 1122 1123 mutex_lock(&adev->pm.mutex); 1124 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1125 type, 1126 input, 1127 size); 1128 mutex_unlock(&adev->pm.mutex); 1129 1130 return ret; 1131 } 1132 1133 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1134 uint32_t type, 1135 long *input, 1136 uint32_t size) 1137 { 1138 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1139 int ret = 0; 1140 1141 if (!pp_funcs->odn_edit_dpm_table) 1142 return 0; 1143 1144 mutex_lock(&adev->pm.mutex); 1145 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1146 type, 1147 input, 1148 size); 1149 mutex_unlock(&adev->pm.mutex); 1150 1151 return ret; 1152 } 1153 1154 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1155 enum pp_clock_type type, 1156 char *buf) 1157 { 1158 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1159 int ret = 0; 1160 1161 if (!pp_funcs->print_clock_levels) 1162 return 0; 1163 1164 mutex_lock(&adev->pm.mutex); 1165 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1166 type, 1167 buf); 1168 mutex_unlock(&adev->pm.mutex); 1169 1170 return ret; 1171 } 1172 1173 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1174 enum pp_clock_type type, 1175 char *buf, 1176 int *offset) 1177 { 1178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1179 int ret = 0; 1180 1181 if (!pp_funcs->emit_clock_levels) 1182 return -ENOENT; 1183 1184 mutex_lock(&adev->pm.mutex); 1185 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1186 type, 1187 buf, 1188 offset); 1189 mutex_unlock(&adev->pm.mutex); 1190 1191 return ret; 1192 } 1193 1194 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1195 uint64_t ppfeature_masks) 1196 { 1197 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1198 int ret = 0; 1199 1200 if (!pp_funcs->set_ppfeature_status) 1201 return 0; 1202 1203 mutex_lock(&adev->pm.mutex); 1204 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1205 ppfeature_masks); 1206 mutex_unlock(&adev->pm.mutex); 1207 1208 return ret; 1209 } 1210 1211 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1212 { 1213 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1214 int ret = 0; 1215 1216 if (!pp_funcs->get_ppfeature_status) 1217 return 0; 1218 1219 mutex_lock(&adev->pm.mutex); 1220 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1221 buf); 1222 mutex_unlock(&adev->pm.mutex); 1223 1224 return ret; 1225 } 1226 1227 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1228 enum pp_clock_type type, 1229 uint32_t mask) 1230 { 1231 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1232 int ret = 0; 1233 1234 if (!pp_funcs->force_clock_level) 1235 return 0; 1236 1237 mutex_lock(&adev->pm.mutex); 1238 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1239 type, 1240 mask); 1241 mutex_unlock(&adev->pm.mutex); 1242 1243 return ret; 1244 } 1245 1246 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1247 { 1248 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1249 int ret = 0; 1250 1251 if (!pp_funcs->get_sclk_od) 1252 return -EOPNOTSUPP; 1253 1254 mutex_lock(&adev->pm.mutex); 1255 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1256 mutex_unlock(&adev->pm.mutex); 1257 1258 return ret; 1259 } 1260 1261 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1262 { 1263 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1264 1265 if (is_support_sw_smu(adev)) 1266 return -EOPNOTSUPP; 1267 1268 mutex_lock(&adev->pm.mutex); 1269 if (pp_funcs->set_sclk_od) 1270 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1271 mutex_unlock(&adev->pm.mutex); 1272 1273 if (amdgpu_dpm_dispatch_task(adev, 1274 AMD_PP_TASK_READJUST_POWER_STATE, 1275 NULL) == -EOPNOTSUPP) { 1276 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1277 amdgpu_dpm_compute_clocks(adev); 1278 } 1279 1280 return 0; 1281 } 1282 1283 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1284 { 1285 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1286 int ret = 0; 1287 1288 if (!pp_funcs->get_mclk_od) 1289 return -EOPNOTSUPP; 1290 1291 mutex_lock(&adev->pm.mutex); 1292 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1293 mutex_unlock(&adev->pm.mutex); 1294 1295 return ret; 1296 } 1297 1298 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1299 { 1300 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1301 1302 if (is_support_sw_smu(adev)) 1303 return -EOPNOTSUPP; 1304 1305 mutex_lock(&adev->pm.mutex); 1306 if (pp_funcs->set_mclk_od) 1307 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1308 mutex_unlock(&adev->pm.mutex); 1309 1310 if (amdgpu_dpm_dispatch_task(adev, 1311 AMD_PP_TASK_READJUST_POWER_STATE, 1312 NULL) == -EOPNOTSUPP) { 1313 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1314 amdgpu_dpm_compute_clocks(adev); 1315 } 1316 1317 return 0; 1318 } 1319 1320 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1321 char *buf) 1322 { 1323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1324 int ret = 0; 1325 1326 if (!pp_funcs->get_power_profile_mode) 1327 return -EOPNOTSUPP; 1328 1329 mutex_lock(&adev->pm.mutex); 1330 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1331 buf); 1332 mutex_unlock(&adev->pm.mutex); 1333 1334 return ret; 1335 } 1336 1337 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1338 long *input, uint32_t size) 1339 { 1340 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1341 int ret = 0; 1342 1343 if (!pp_funcs->set_power_profile_mode) 1344 return 0; 1345 1346 mutex_lock(&adev->pm.mutex); 1347 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1348 input, 1349 size); 1350 mutex_unlock(&adev->pm.mutex); 1351 1352 return ret; 1353 } 1354 1355 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1356 { 1357 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1358 int ret = 0; 1359 1360 if (!pp_funcs->get_gpu_metrics) 1361 return 0; 1362 1363 mutex_lock(&adev->pm.mutex); 1364 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1365 table); 1366 mutex_unlock(&adev->pm.mutex); 1367 1368 return ret; 1369 } 1370 1371 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, 1372 size_t size) 1373 { 1374 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1375 int ret = 0; 1376 1377 if (!pp_funcs->get_pm_metrics) 1378 return -EOPNOTSUPP; 1379 1380 mutex_lock(&adev->pm.mutex); 1381 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, 1382 size); 1383 mutex_unlock(&adev->pm.mutex); 1384 1385 return ret; 1386 } 1387 1388 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1389 uint32_t *fan_mode) 1390 { 1391 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1392 int ret = 0; 1393 1394 if (!pp_funcs->get_fan_control_mode) 1395 return -EOPNOTSUPP; 1396 1397 mutex_lock(&adev->pm.mutex); 1398 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1399 fan_mode); 1400 mutex_unlock(&adev->pm.mutex); 1401 1402 return ret; 1403 } 1404 1405 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1406 uint32_t speed) 1407 { 1408 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1409 int ret = 0; 1410 1411 if (!pp_funcs->set_fan_speed_pwm) 1412 return -EOPNOTSUPP; 1413 1414 mutex_lock(&adev->pm.mutex); 1415 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1416 speed); 1417 mutex_unlock(&adev->pm.mutex); 1418 1419 return ret; 1420 } 1421 1422 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1423 uint32_t *speed) 1424 { 1425 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1426 int ret = 0; 1427 1428 if (!pp_funcs->get_fan_speed_pwm) 1429 return -EOPNOTSUPP; 1430 1431 mutex_lock(&adev->pm.mutex); 1432 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1433 speed); 1434 mutex_unlock(&adev->pm.mutex); 1435 1436 return ret; 1437 } 1438 1439 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1440 uint32_t *speed) 1441 { 1442 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1443 int ret = 0; 1444 1445 if (!pp_funcs->get_fan_speed_rpm) 1446 return -EOPNOTSUPP; 1447 1448 mutex_lock(&adev->pm.mutex); 1449 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1450 speed); 1451 mutex_unlock(&adev->pm.mutex); 1452 1453 return ret; 1454 } 1455 1456 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1457 uint32_t speed) 1458 { 1459 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1460 int ret = 0; 1461 1462 if (!pp_funcs->set_fan_speed_rpm) 1463 return -EOPNOTSUPP; 1464 1465 mutex_lock(&adev->pm.mutex); 1466 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1467 speed); 1468 mutex_unlock(&adev->pm.mutex); 1469 1470 return ret; 1471 } 1472 1473 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1474 uint32_t mode) 1475 { 1476 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1477 int ret = 0; 1478 1479 if (!pp_funcs->set_fan_control_mode) 1480 return -EOPNOTSUPP; 1481 1482 mutex_lock(&adev->pm.mutex); 1483 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1484 mode); 1485 mutex_unlock(&adev->pm.mutex); 1486 1487 return ret; 1488 } 1489 1490 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1491 uint32_t *limit, 1492 enum pp_power_limit_level pp_limit_level, 1493 enum pp_power_type power_type) 1494 { 1495 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1496 int ret = 0; 1497 1498 if (!pp_funcs->get_power_limit) 1499 return -ENODATA; 1500 1501 mutex_lock(&adev->pm.mutex); 1502 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1503 limit, 1504 pp_limit_level, 1505 power_type); 1506 mutex_unlock(&adev->pm.mutex); 1507 1508 return ret; 1509 } 1510 1511 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1512 uint32_t limit) 1513 { 1514 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1515 int ret = 0; 1516 1517 if (!pp_funcs->set_power_limit) 1518 return -EINVAL; 1519 1520 mutex_lock(&adev->pm.mutex); 1521 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1522 limit); 1523 mutex_unlock(&adev->pm.mutex); 1524 1525 return ret; 1526 } 1527 1528 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1529 { 1530 bool cclk_dpm_supported = false; 1531 1532 if (!is_support_sw_smu(adev)) 1533 return false; 1534 1535 mutex_lock(&adev->pm.mutex); 1536 cclk_dpm_supported = is_support_cclk_dpm(adev); 1537 mutex_unlock(&adev->pm.mutex); 1538 1539 return (int)cclk_dpm_supported; 1540 } 1541 1542 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1543 struct seq_file *m) 1544 { 1545 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1546 1547 if (!pp_funcs->debugfs_print_current_performance_level) 1548 return -EOPNOTSUPP; 1549 1550 mutex_lock(&adev->pm.mutex); 1551 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1552 m); 1553 mutex_unlock(&adev->pm.mutex); 1554 1555 return 0; 1556 } 1557 1558 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1559 void **addr, 1560 size_t *size) 1561 { 1562 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1563 int ret = 0; 1564 1565 if (!pp_funcs->get_smu_prv_buf_details) 1566 return -ENOSYS; 1567 1568 mutex_lock(&adev->pm.mutex); 1569 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1570 addr, 1571 size); 1572 mutex_unlock(&adev->pm.mutex); 1573 1574 return ret; 1575 } 1576 1577 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1578 { 1579 if (is_support_sw_smu(adev)) { 1580 struct smu_context *smu = adev->powerplay.pp_handle; 1581 1582 return (smu->od_enabled || smu->is_apu); 1583 } else { 1584 struct pp_hwmgr *hwmgr; 1585 1586 /* 1587 * dpm on some legacy asics don't carry od_enabled member 1588 * as its pp_handle is casted directly from adev. 1589 */ 1590 if (amdgpu_dpm_is_legacy_dpm(adev)) 1591 return false; 1592 1593 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1594 1595 return hwmgr->od_enabled; 1596 } 1597 } 1598 1599 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1600 const char *buf, 1601 size_t size) 1602 { 1603 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1604 int ret = 0; 1605 1606 if (!pp_funcs->set_pp_table) 1607 return -EOPNOTSUPP; 1608 1609 mutex_lock(&adev->pm.mutex); 1610 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1611 buf, 1612 size); 1613 mutex_unlock(&adev->pm.mutex); 1614 1615 return ret; 1616 } 1617 1618 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1619 { 1620 struct smu_context *smu = adev->powerplay.pp_handle; 1621 1622 if (!is_support_sw_smu(adev)) 1623 return INT_MAX; 1624 1625 return smu->cpu_core_num; 1626 } 1627 1628 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1629 { 1630 if (!is_support_sw_smu(adev)) 1631 return; 1632 1633 amdgpu_smu_stb_debug_fs_init(adev); 1634 } 1635 1636 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1637 const struct amd_pp_display_configuration *input) 1638 { 1639 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1640 int ret = 0; 1641 1642 if (!pp_funcs->display_configuration_change) 1643 return 0; 1644 1645 mutex_lock(&adev->pm.mutex); 1646 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1647 input); 1648 mutex_unlock(&adev->pm.mutex); 1649 1650 return ret; 1651 } 1652 1653 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1654 enum amd_pp_clock_type type, 1655 struct amd_pp_clocks *clocks) 1656 { 1657 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1658 int ret = 0; 1659 1660 if (!pp_funcs->get_clock_by_type) 1661 return 0; 1662 1663 mutex_lock(&adev->pm.mutex); 1664 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1665 type, 1666 clocks); 1667 mutex_unlock(&adev->pm.mutex); 1668 1669 return ret; 1670 } 1671 1672 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1673 struct amd_pp_simple_clock_info *clocks) 1674 { 1675 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1676 int ret = 0; 1677 1678 if (!pp_funcs->get_display_mode_validation_clocks) 1679 return 0; 1680 1681 mutex_lock(&adev->pm.mutex); 1682 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1683 clocks); 1684 mutex_unlock(&adev->pm.mutex); 1685 1686 return ret; 1687 } 1688 1689 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1690 enum amd_pp_clock_type type, 1691 struct pp_clock_levels_with_latency *clocks) 1692 { 1693 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1694 int ret = 0; 1695 1696 if (!pp_funcs->get_clock_by_type_with_latency) 1697 return 0; 1698 1699 mutex_lock(&adev->pm.mutex); 1700 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1701 type, 1702 clocks); 1703 mutex_unlock(&adev->pm.mutex); 1704 1705 return ret; 1706 } 1707 1708 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1709 enum amd_pp_clock_type type, 1710 struct pp_clock_levels_with_voltage *clocks) 1711 { 1712 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1713 int ret = 0; 1714 1715 if (!pp_funcs->get_clock_by_type_with_voltage) 1716 return 0; 1717 1718 mutex_lock(&adev->pm.mutex); 1719 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1720 type, 1721 clocks); 1722 mutex_unlock(&adev->pm.mutex); 1723 1724 return ret; 1725 } 1726 1727 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1728 void *clock_ranges) 1729 { 1730 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1731 int ret = 0; 1732 1733 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1734 return -EOPNOTSUPP; 1735 1736 mutex_lock(&adev->pm.mutex); 1737 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1738 clock_ranges); 1739 mutex_unlock(&adev->pm.mutex); 1740 1741 return ret; 1742 } 1743 1744 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1745 struct pp_display_clock_request *clock) 1746 { 1747 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1748 int ret = 0; 1749 1750 if (!pp_funcs->display_clock_voltage_request) 1751 return -EOPNOTSUPP; 1752 1753 mutex_lock(&adev->pm.mutex); 1754 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1755 clock); 1756 mutex_unlock(&adev->pm.mutex); 1757 1758 return ret; 1759 } 1760 1761 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1762 struct amd_pp_clock_info *clocks) 1763 { 1764 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1765 int ret = 0; 1766 1767 if (!pp_funcs->get_current_clocks) 1768 return -EOPNOTSUPP; 1769 1770 mutex_lock(&adev->pm.mutex); 1771 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1772 clocks); 1773 mutex_unlock(&adev->pm.mutex); 1774 1775 return ret; 1776 } 1777 1778 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1779 { 1780 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1781 1782 if (!pp_funcs->notify_smu_enable_pwe) 1783 return; 1784 1785 mutex_lock(&adev->pm.mutex); 1786 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1787 mutex_unlock(&adev->pm.mutex); 1788 } 1789 1790 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1791 uint32_t count) 1792 { 1793 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1794 int ret = 0; 1795 1796 if (!pp_funcs->set_active_display_count) 1797 return -EOPNOTSUPP; 1798 1799 mutex_lock(&adev->pm.mutex); 1800 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1801 count); 1802 mutex_unlock(&adev->pm.mutex); 1803 1804 return ret; 1805 } 1806 1807 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1808 uint32_t clock) 1809 { 1810 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1811 int ret = 0; 1812 1813 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1814 return -EOPNOTSUPP; 1815 1816 mutex_lock(&adev->pm.mutex); 1817 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1818 clock); 1819 mutex_unlock(&adev->pm.mutex); 1820 1821 return ret; 1822 } 1823 1824 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1825 uint32_t clock) 1826 { 1827 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1828 1829 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1830 return; 1831 1832 mutex_lock(&adev->pm.mutex); 1833 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1834 clock); 1835 mutex_unlock(&adev->pm.mutex); 1836 } 1837 1838 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1839 uint32_t clock) 1840 { 1841 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1842 1843 if (!pp_funcs->set_hard_min_fclk_by_freq) 1844 return; 1845 1846 mutex_lock(&adev->pm.mutex); 1847 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1848 clock); 1849 mutex_unlock(&adev->pm.mutex); 1850 } 1851 1852 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1853 bool disable_memory_clock_switch) 1854 { 1855 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1856 int ret = 0; 1857 1858 if (!pp_funcs->display_disable_memory_clock_switch) 1859 return 0; 1860 1861 mutex_lock(&adev->pm.mutex); 1862 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1863 disable_memory_clock_switch); 1864 mutex_unlock(&adev->pm.mutex); 1865 1866 return ret; 1867 } 1868 1869 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1870 struct pp_smu_nv_clock_table *max_clocks) 1871 { 1872 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1873 int ret = 0; 1874 1875 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1876 return -EOPNOTSUPP; 1877 1878 mutex_lock(&adev->pm.mutex); 1879 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1880 max_clocks); 1881 mutex_unlock(&adev->pm.mutex); 1882 1883 return ret; 1884 } 1885 1886 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1887 unsigned int *clock_values_in_khz, 1888 unsigned int *num_states) 1889 { 1890 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1891 int ret = 0; 1892 1893 if (!pp_funcs->get_uclk_dpm_states) 1894 return -EOPNOTSUPP; 1895 1896 mutex_lock(&adev->pm.mutex); 1897 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1898 clock_values_in_khz, 1899 num_states); 1900 mutex_unlock(&adev->pm.mutex); 1901 1902 return ret; 1903 } 1904 1905 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1906 struct dpm_clocks *clock_table) 1907 { 1908 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1909 int ret = 0; 1910 1911 if (!pp_funcs->get_dpm_clock_table) 1912 return -EOPNOTSUPP; 1913 1914 mutex_lock(&adev->pm.mutex); 1915 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1916 clock_table); 1917 mutex_unlock(&adev->pm.mutex); 1918 1919 return ret; 1920 } 1921