1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 74 { 75 int ret = 0; 76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 78 79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 80 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 81 block_type, gate ? "gate" : "ungate"); 82 return 0; 83 } 84 85 mutex_lock(&adev->pm.mutex); 86 87 switch (block_type) { 88 case AMD_IP_BLOCK_TYPE_UVD: 89 case AMD_IP_BLOCK_TYPE_VCE: 90 case AMD_IP_BLOCK_TYPE_GFX: 91 case AMD_IP_BLOCK_TYPE_VCN: 92 case AMD_IP_BLOCK_TYPE_SDMA: 93 case AMD_IP_BLOCK_TYPE_JPEG: 94 case AMD_IP_BLOCK_TYPE_GMC: 95 case AMD_IP_BLOCK_TYPE_ACP: 96 case AMD_IP_BLOCK_TYPE_VPE: 97 if (pp_funcs && pp_funcs->set_powergating_by_smu) 98 ret = (pp_funcs->set_powergating_by_smu( 99 (adev)->powerplay.pp_handle, block_type, gate)); 100 break; 101 default: 102 break; 103 } 104 105 if (!ret) 106 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 107 108 mutex_unlock(&adev->pm.mutex); 109 110 return ret; 111 } 112 113 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 114 { 115 struct smu_context *smu = adev->powerplay.pp_handle; 116 int ret = -EOPNOTSUPP; 117 118 mutex_lock(&adev->pm.mutex); 119 ret = smu_set_gfx_power_up_by_imu(smu); 120 mutex_unlock(&adev->pm.mutex); 121 122 msleep(10); 123 124 return ret; 125 } 126 127 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 128 { 129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 130 void *pp_handle = adev->powerplay.pp_handle; 131 int ret = 0; 132 133 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 134 return -ENOENT; 135 136 mutex_lock(&adev->pm.mutex); 137 138 /* enter BACO state */ 139 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 140 141 mutex_unlock(&adev->pm.mutex); 142 143 return ret; 144 } 145 146 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 147 { 148 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 149 void *pp_handle = adev->powerplay.pp_handle; 150 int ret = 0; 151 152 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 153 return -ENOENT; 154 155 mutex_lock(&adev->pm.mutex); 156 157 /* exit BACO state */ 158 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 159 160 mutex_unlock(&adev->pm.mutex); 161 162 return ret; 163 } 164 165 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 166 enum pp_mp1_state mp1_state) 167 { 168 int ret = 0; 169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 170 171 if (pp_funcs && pp_funcs->set_mp1_state) { 172 mutex_lock(&adev->pm.mutex); 173 174 ret = pp_funcs->set_mp1_state( 175 adev->powerplay.pp_handle, 176 mp1_state); 177 178 mutex_unlock(&adev->pm.mutex); 179 } 180 181 return ret; 182 } 183 184 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) 185 { 186 int ret = 0; 187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 188 189 if (pp_funcs && pp_funcs->notify_rlc_state) { 190 mutex_lock(&adev->pm.mutex); 191 192 ret = pp_funcs->notify_rlc_state( 193 adev->powerplay.pp_handle, 194 en); 195 196 mutex_unlock(&adev->pm.mutex); 197 } 198 199 return ret; 200 } 201 202 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 203 { 204 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 205 void *pp_handle = adev->powerplay.pp_handle; 206 int ret; 207 208 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 209 return 0; 210 /* Don't use baco for reset in S3. 211 * This is a workaround for some platforms 212 * where entering BACO during suspend 213 * seems to cause reboots or hangs. 214 * This might be related to the fact that BACO controls 215 * power to the whole GPU including devices like audio and USB. 216 * Powering down/up everything may adversely affect these other 217 * devices. Needs more investigation. 218 */ 219 if (adev->in_s3) 220 return 0; 221 222 mutex_lock(&adev->pm.mutex); 223 224 ret = pp_funcs->get_asic_baco_capability(pp_handle); 225 226 mutex_unlock(&adev->pm.mutex); 227 228 return ret; 229 } 230 231 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 232 { 233 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 234 void *pp_handle = adev->powerplay.pp_handle; 235 int ret = 0; 236 237 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 238 return -ENOENT; 239 240 mutex_lock(&adev->pm.mutex); 241 242 ret = pp_funcs->asic_reset_mode_2(pp_handle); 243 244 mutex_unlock(&adev->pm.mutex); 245 246 return ret; 247 } 248 249 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 250 { 251 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 252 void *pp_handle = adev->powerplay.pp_handle; 253 int ret = 0; 254 255 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 256 return -ENOENT; 257 258 mutex_lock(&adev->pm.mutex); 259 260 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 261 262 mutex_unlock(&adev->pm.mutex); 263 264 return ret; 265 } 266 267 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 268 { 269 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 270 void *pp_handle = adev->powerplay.pp_handle; 271 int ret = 0; 272 273 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 274 return -ENOENT; 275 276 mutex_lock(&adev->pm.mutex); 277 278 /* enter BACO state */ 279 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 280 if (ret) 281 goto out; 282 283 /* exit BACO state */ 284 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 285 286 out: 287 mutex_unlock(&adev->pm.mutex); 288 return ret; 289 } 290 291 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 292 { 293 struct smu_context *smu = adev->powerplay.pp_handle; 294 bool support_mode1_reset = false; 295 296 if (is_support_sw_smu(adev)) { 297 mutex_lock(&adev->pm.mutex); 298 support_mode1_reset = smu_mode1_reset_is_support(smu); 299 mutex_unlock(&adev->pm.mutex); 300 } 301 302 return support_mode1_reset; 303 } 304 305 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 306 { 307 struct smu_context *smu = adev->powerplay.pp_handle; 308 int ret = -EOPNOTSUPP; 309 310 if (is_support_sw_smu(adev)) { 311 mutex_lock(&adev->pm.mutex); 312 ret = smu_mode1_reset(smu); 313 mutex_unlock(&adev->pm.mutex); 314 } 315 316 return ret; 317 } 318 319 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 320 enum PP_SMC_POWER_PROFILE type, 321 bool en) 322 { 323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 324 int ret = 0; 325 326 if (amdgpu_sriov_vf(adev)) 327 return 0; 328 329 if (pp_funcs && pp_funcs->switch_power_profile) { 330 mutex_lock(&adev->pm.mutex); 331 ret = pp_funcs->switch_power_profile( 332 adev->powerplay.pp_handle, type, en); 333 mutex_unlock(&adev->pm.mutex); 334 } 335 336 return ret; 337 } 338 339 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 340 uint32_t pstate) 341 { 342 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 343 int ret = 0; 344 345 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 346 mutex_lock(&adev->pm.mutex); 347 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 348 pstate); 349 mutex_unlock(&adev->pm.mutex); 350 } 351 352 return ret; 353 } 354 355 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 356 uint32_t cstate) 357 { 358 int ret = 0; 359 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 360 void *pp_handle = adev->powerplay.pp_handle; 361 362 if (pp_funcs && pp_funcs->set_df_cstate) { 363 mutex_lock(&adev->pm.mutex); 364 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 365 mutex_unlock(&adev->pm.mutex); 366 } 367 368 return ret; 369 } 370 371 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, 372 enum pp_pm_policy p_type, char *buf) 373 { 374 struct smu_context *smu = adev->powerplay.pp_handle; 375 int ret = -EOPNOTSUPP; 376 377 if (is_support_sw_smu(adev)) { 378 mutex_lock(&adev->pm.mutex); 379 ret = smu_get_pm_policy_info(smu, p_type, buf); 380 mutex_unlock(&adev->pm.mutex); 381 } 382 383 return ret; 384 } 385 386 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, 387 int policy_level) 388 { 389 struct smu_context *smu = adev->powerplay.pp_handle; 390 int ret = -EOPNOTSUPP; 391 392 if (is_support_sw_smu(adev)) { 393 mutex_lock(&adev->pm.mutex); 394 ret = smu_set_pm_policy(smu, policy_type, policy_level); 395 mutex_unlock(&adev->pm.mutex); 396 } 397 398 return ret; 399 } 400 401 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 402 { 403 void *pp_handle = adev->powerplay.pp_handle; 404 const struct amd_pm_funcs *pp_funcs = 405 adev->powerplay.pp_funcs; 406 int ret = 0; 407 408 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 409 mutex_lock(&adev->pm.mutex); 410 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 411 mutex_unlock(&adev->pm.mutex); 412 } 413 414 return ret; 415 } 416 417 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 418 uint32_t msg_id) 419 { 420 void *pp_handle = adev->powerplay.pp_handle; 421 const struct amd_pm_funcs *pp_funcs = 422 adev->powerplay.pp_funcs; 423 int ret = 0; 424 425 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 426 mutex_lock(&adev->pm.mutex); 427 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 428 msg_id); 429 mutex_unlock(&adev->pm.mutex); 430 } 431 432 return ret; 433 } 434 435 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 436 bool acquire) 437 { 438 void *pp_handle = adev->powerplay.pp_handle; 439 const struct amd_pm_funcs *pp_funcs = 440 adev->powerplay.pp_funcs; 441 int ret = -EOPNOTSUPP; 442 443 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 444 mutex_lock(&adev->pm.mutex); 445 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 446 acquire); 447 mutex_unlock(&adev->pm.mutex); 448 } 449 450 return ret; 451 } 452 453 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 454 { 455 if (adev->pm.dpm_enabled) { 456 mutex_lock(&adev->pm.mutex); 457 if (power_supply_is_system_supplied() > 0) 458 adev->pm.ac_power = true; 459 else 460 adev->pm.ac_power = false; 461 462 if (adev->powerplay.pp_funcs && 463 adev->powerplay.pp_funcs->enable_bapm) 464 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 465 466 if (is_support_sw_smu(adev)) 467 smu_set_ac_dc(adev->powerplay.pp_handle); 468 469 mutex_unlock(&adev->pm.mutex); 470 } 471 } 472 473 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 474 void *data, uint32_t *size) 475 { 476 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 477 int ret = -EINVAL; 478 479 if (!data || !size) 480 return -EINVAL; 481 482 if (pp_funcs && pp_funcs->read_sensor) { 483 mutex_lock(&adev->pm.mutex); 484 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 485 sensor, 486 data, 487 size); 488 mutex_unlock(&adev->pm.mutex); 489 } 490 491 return ret; 492 } 493 494 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 495 { 496 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 497 int ret = -EOPNOTSUPP; 498 499 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 500 mutex_lock(&adev->pm.mutex); 501 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 502 mutex_unlock(&adev->pm.mutex); 503 } 504 505 return ret; 506 } 507 508 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 509 { 510 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 511 int ret = -EOPNOTSUPP; 512 513 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 514 mutex_lock(&adev->pm.mutex); 515 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 516 mutex_unlock(&adev->pm.mutex); 517 } 518 519 return ret; 520 } 521 522 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 523 { 524 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 525 int i; 526 527 if (!adev->pm.dpm_enabled) 528 return; 529 530 if (!pp_funcs->pm_compute_clocks) 531 return; 532 533 if (adev->mode_info.num_crtc) 534 amdgpu_display_bandwidth_update(adev); 535 536 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 537 struct amdgpu_ring *ring = adev->rings[i]; 538 if (ring && ring->sched.ready) 539 amdgpu_fence_wait_empty(ring); 540 } 541 542 mutex_lock(&adev->pm.mutex); 543 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 544 mutex_unlock(&adev->pm.mutex); 545 } 546 547 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 548 { 549 int ret = 0; 550 551 if (adev->family == AMDGPU_FAMILY_SI) { 552 mutex_lock(&adev->pm.mutex); 553 if (enable) { 554 adev->pm.dpm.uvd_active = true; 555 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 556 } else { 557 adev->pm.dpm.uvd_active = false; 558 } 559 mutex_unlock(&adev->pm.mutex); 560 561 amdgpu_dpm_compute_clocks(adev); 562 return; 563 } 564 565 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 566 if (ret) 567 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 568 enable ? "enable" : "disable", ret); 569 } 570 571 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 572 { 573 int ret = 0; 574 575 if (adev->family == AMDGPU_FAMILY_SI) { 576 mutex_lock(&adev->pm.mutex); 577 if (enable) { 578 adev->pm.dpm.vce_active = true; 579 /* XXX select vce level based on ring/task */ 580 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 581 } else { 582 adev->pm.dpm.vce_active = false; 583 } 584 mutex_unlock(&adev->pm.mutex); 585 586 amdgpu_dpm_compute_clocks(adev); 587 return; 588 } 589 590 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 591 if (ret) 592 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 593 enable ? "enable" : "disable", ret); 594 } 595 596 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 597 { 598 int ret = 0; 599 600 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 601 if (ret) 602 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 603 enable ? "enable" : "disable", ret); 604 } 605 606 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) 607 { 608 int ret = 0; 609 610 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable); 611 if (ret) 612 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", 613 enable ? "enable" : "disable", ret); 614 } 615 616 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 617 { 618 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 619 int r = 0; 620 621 if (!pp_funcs || !pp_funcs->load_firmware || 622 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU))) 623 return 0; 624 625 mutex_lock(&adev->pm.mutex); 626 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 627 if (r) { 628 pr_err("smu firmware loading failed\n"); 629 goto out; 630 } 631 632 if (smu_version) 633 *smu_version = adev->pm.fw_version; 634 635 out: 636 mutex_unlock(&adev->pm.mutex); 637 return r; 638 } 639 640 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 641 { 642 int ret = 0; 643 644 if (is_support_sw_smu(adev)) { 645 mutex_lock(&adev->pm.mutex); 646 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 647 enable); 648 mutex_unlock(&adev->pm.mutex); 649 } 650 651 return ret; 652 } 653 654 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 655 { 656 struct smu_context *smu = adev->powerplay.pp_handle; 657 int ret = 0; 658 659 if (!is_support_sw_smu(adev)) 660 return -EOPNOTSUPP; 661 662 mutex_lock(&adev->pm.mutex); 663 ret = smu_send_hbm_bad_pages_num(smu, size); 664 mutex_unlock(&adev->pm.mutex); 665 666 return ret; 667 } 668 669 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 670 { 671 struct smu_context *smu = adev->powerplay.pp_handle; 672 int ret = 0; 673 674 if (!is_support_sw_smu(adev)) 675 return -EOPNOTSUPP; 676 677 mutex_lock(&adev->pm.mutex); 678 ret = smu_send_hbm_bad_channel_flag(smu, size); 679 mutex_unlock(&adev->pm.mutex); 680 681 return ret; 682 } 683 684 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) 685 { 686 struct smu_context *smu = adev->powerplay.pp_handle; 687 int ret; 688 689 if (!is_support_sw_smu(adev)) 690 return -EOPNOTSUPP; 691 692 mutex_lock(&adev->pm.mutex); 693 ret = smu_send_rma_reason(smu); 694 mutex_unlock(&adev->pm.mutex); 695 696 return ret; 697 } 698 699 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 700 enum pp_clock_type type, 701 uint32_t *min, 702 uint32_t *max) 703 { 704 int ret = 0; 705 706 if (type != PP_SCLK) 707 return -EINVAL; 708 709 if (!is_support_sw_smu(adev)) 710 return -EOPNOTSUPP; 711 712 mutex_lock(&adev->pm.mutex); 713 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 714 SMU_SCLK, 715 min, 716 max); 717 mutex_unlock(&adev->pm.mutex); 718 719 return ret; 720 } 721 722 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 723 enum pp_clock_type type, 724 uint32_t min, 725 uint32_t max) 726 { 727 struct smu_context *smu = adev->powerplay.pp_handle; 728 int ret = 0; 729 730 if (type != PP_SCLK) 731 return -EINVAL; 732 733 if (!is_support_sw_smu(adev)) 734 return -EOPNOTSUPP; 735 736 mutex_lock(&adev->pm.mutex); 737 ret = smu_set_soft_freq_range(smu, 738 SMU_SCLK, 739 min, 740 max); 741 mutex_unlock(&adev->pm.mutex); 742 743 return ret; 744 } 745 746 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 747 { 748 struct smu_context *smu = adev->powerplay.pp_handle; 749 int ret = 0; 750 751 if (!is_support_sw_smu(adev)) 752 return 0; 753 754 mutex_lock(&adev->pm.mutex); 755 ret = smu_write_watermarks_table(smu); 756 mutex_unlock(&adev->pm.mutex); 757 758 return ret; 759 } 760 761 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 762 enum smu_event_type event, 763 uint64_t event_arg) 764 { 765 struct smu_context *smu = adev->powerplay.pp_handle; 766 int ret = 0; 767 768 if (!is_support_sw_smu(adev)) 769 return -EOPNOTSUPP; 770 771 mutex_lock(&adev->pm.mutex); 772 ret = smu_wait_for_event(smu, event, event_arg); 773 mutex_unlock(&adev->pm.mutex); 774 775 return ret; 776 } 777 778 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 779 { 780 struct smu_context *smu = adev->powerplay.pp_handle; 781 int ret = 0; 782 783 if (!is_support_sw_smu(adev)) 784 return -EOPNOTSUPP; 785 786 mutex_lock(&adev->pm.mutex); 787 ret = smu_set_residency_gfxoff(smu, value); 788 mutex_unlock(&adev->pm.mutex); 789 790 return ret; 791 } 792 793 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 794 { 795 struct smu_context *smu = adev->powerplay.pp_handle; 796 int ret = 0; 797 798 if (!is_support_sw_smu(adev)) 799 return -EOPNOTSUPP; 800 801 mutex_lock(&adev->pm.mutex); 802 ret = smu_get_residency_gfxoff(smu, value); 803 mutex_unlock(&adev->pm.mutex); 804 805 return ret; 806 } 807 808 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 809 { 810 struct smu_context *smu = adev->powerplay.pp_handle; 811 int ret = 0; 812 813 if (!is_support_sw_smu(adev)) 814 return -EOPNOTSUPP; 815 816 mutex_lock(&adev->pm.mutex); 817 ret = smu_get_entrycount_gfxoff(smu, value); 818 mutex_unlock(&adev->pm.mutex); 819 820 return ret; 821 } 822 823 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 824 { 825 struct smu_context *smu = adev->powerplay.pp_handle; 826 int ret = 0; 827 828 if (!is_support_sw_smu(adev)) 829 return -EOPNOTSUPP; 830 831 mutex_lock(&adev->pm.mutex); 832 ret = smu_get_status_gfxoff(smu, value); 833 mutex_unlock(&adev->pm.mutex); 834 835 return ret; 836 } 837 838 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 839 { 840 struct smu_context *smu = adev->powerplay.pp_handle; 841 842 if (!is_support_sw_smu(adev)) 843 return 0; 844 845 return atomic64_read(&smu->throttle_int_counter); 846 } 847 848 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 849 * @adev: amdgpu_device pointer 850 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 851 * 852 */ 853 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 854 enum gfx_change_state state) 855 { 856 mutex_lock(&adev->pm.mutex); 857 if (adev->powerplay.pp_funcs && 858 adev->powerplay.pp_funcs->gfx_state_change_set) 859 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 860 (adev)->powerplay.pp_handle, state)); 861 mutex_unlock(&adev->pm.mutex); 862 } 863 864 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 865 void *umc_ecc) 866 { 867 struct smu_context *smu = adev->powerplay.pp_handle; 868 int ret = 0; 869 870 if (!is_support_sw_smu(adev)) 871 return -EOPNOTSUPP; 872 873 mutex_lock(&adev->pm.mutex); 874 ret = smu_get_ecc_info(smu, umc_ecc); 875 mutex_unlock(&adev->pm.mutex); 876 877 return ret; 878 } 879 880 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 881 uint32_t idx) 882 { 883 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 884 struct amd_vce_state *vstate = NULL; 885 886 if (!pp_funcs->get_vce_clock_state) 887 return NULL; 888 889 mutex_lock(&adev->pm.mutex); 890 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 891 idx); 892 mutex_unlock(&adev->pm.mutex); 893 894 return vstate; 895 } 896 897 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 898 enum amd_pm_state_type *state) 899 { 900 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 901 902 mutex_lock(&adev->pm.mutex); 903 904 if (!pp_funcs->get_current_power_state) { 905 *state = adev->pm.dpm.user_state; 906 goto out; 907 } 908 909 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 910 if (*state < POWER_STATE_TYPE_DEFAULT || 911 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 912 *state = adev->pm.dpm.user_state; 913 914 out: 915 mutex_unlock(&adev->pm.mutex); 916 } 917 918 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 919 enum amd_pm_state_type state) 920 { 921 mutex_lock(&adev->pm.mutex); 922 adev->pm.dpm.user_state = state; 923 mutex_unlock(&adev->pm.mutex); 924 925 if (is_support_sw_smu(adev)) 926 return; 927 928 if (amdgpu_dpm_dispatch_task(adev, 929 AMD_PP_TASK_ENABLE_USER_STATE, 930 &state) == -EOPNOTSUPP) 931 amdgpu_dpm_compute_clocks(adev); 932 } 933 934 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 935 { 936 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 937 enum amd_dpm_forced_level level; 938 939 if (!pp_funcs) 940 return AMD_DPM_FORCED_LEVEL_AUTO; 941 942 mutex_lock(&adev->pm.mutex); 943 if (pp_funcs->get_performance_level) 944 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 945 else 946 level = adev->pm.dpm.forced_level; 947 mutex_unlock(&adev->pm.mutex); 948 949 return level; 950 } 951 952 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 953 enum amd_dpm_forced_level level) 954 { 955 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 956 enum amd_dpm_forced_level current_level; 957 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 958 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 959 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 960 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 961 962 if (!pp_funcs || !pp_funcs->force_performance_level) 963 return 0; 964 965 if (adev->pm.dpm.thermal_active) 966 return -EINVAL; 967 968 current_level = amdgpu_dpm_get_performance_level(adev); 969 if (current_level == level) 970 return 0; 971 972 if (adev->asic_type == CHIP_RAVEN) { 973 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 974 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 975 level == AMD_DPM_FORCED_LEVEL_MANUAL) 976 amdgpu_gfx_off_ctrl(adev, false); 977 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 978 level != AMD_DPM_FORCED_LEVEL_MANUAL) 979 amdgpu_gfx_off_ctrl(adev, true); 980 } 981 } 982 983 if (!(current_level & profile_mode_mask) && 984 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 985 return -EINVAL; 986 987 if (!(current_level & profile_mode_mask) && 988 (level & profile_mode_mask)) { 989 /* enter UMD Pstate */ 990 amdgpu_device_ip_set_powergating_state(adev, 991 AMD_IP_BLOCK_TYPE_GFX, 992 AMD_PG_STATE_UNGATE); 993 amdgpu_device_ip_set_clockgating_state(adev, 994 AMD_IP_BLOCK_TYPE_GFX, 995 AMD_CG_STATE_UNGATE); 996 } else if ((current_level & profile_mode_mask) && 997 !(level & profile_mode_mask)) { 998 /* exit UMD Pstate */ 999 amdgpu_device_ip_set_clockgating_state(adev, 1000 AMD_IP_BLOCK_TYPE_GFX, 1001 AMD_CG_STATE_GATE); 1002 amdgpu_device_ip_set_powergating_state(adev, 1003 AMD_IP_BLOCK_TYPE_GFX, 1004 AMD_PG_STATE_GATE); 1005 } 1006 1007 mutex_lock(&adev->pm.mutex); 1008 1009 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 1010 level)) { 1011 mutex_unlock(&adev->pm.mutex); 1012 return -EINVAL; 1013 } 1014 1015 adev->pm.dpm.forced_level = level; 1016 1017 mutex_unlock(&adev->pm.mutex); 1018 1019 return 0; 1020 } 1021 1022 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 1023 struct pp_states_info *states) 1024 { 1025 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1026 int ret = 0; 1027 1028 if (!pp_funcs->get_pp_num_states) 1029 return -EOPNOTSUPP; 1030 1031 mutex_lock(&adev->pm.mutex); 1032 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 1033 states); 1034 mutex_unlock(&adev->pm.mutex); 1035 1036 return ret; 1037 } 1038 1039 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 1040 enum amd_pp_task task_id, 1041 enum amd_pm_state_type *user_state) 1042 { 1043 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1044 int ret = 0; 1045 1046 if (!pp_funcs->dispatch_tasks) 1047 return -EOPNOTSUPP; 1048 1049 mutex_lock(&adev->pm.mutex); 1050 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 1051 task_id, 1052 user_state); 1053 mutex_unlock(&adev->pm.mutex); 1054 1055 return ret; 1056 } 1057 1058 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1059 { 1060 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1061 int ret = 0; 1062 1063 if (!pp_funcs->get_pp_table) 1064 return 0; 1065 1066 mutex_lock(&adev->pm.mutex); 1067 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1068 table); 1069 mutex_unlock(&adev->pm.mutex); 1070 1071 return ret; 1072 } 1073 1074 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1075 uint32_t type, 1076 long *input, 1077 uint32_t size) 1078 { 1079 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1080 int ret = 0; 1081 1082 if (!pp_funcs->set_fine_grain_clk_vol) 1083 return 0; 1084 1085 mutex_lock(&adev->pm.mutex); 1086 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1087 type, 1088 input, 1089 size); 1090 mutex_unlock(&adev->pm.mutex); 1091 1092 return ret; 1093 } 1094 1095 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1096 uint32_t type, 1097 long *input, 1098 uint32_t size) 1099 { 1100 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1101 int ret = 0; 1102 1103 if (!pp_funcs->odn_edit_dpm_table) 1104 return 0; 1105 1106 mutex_lock(&adev->pm.mutex); 1107 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1108 type, 1109 input, 1110 size); 1111 mutex_unlock(&adev->pm.mutex); 1112 1113 return ret; 1114 } 1115 1116 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1117 enum pp_clock_type type, 1118 char *buf) 1119 { 1120 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1121 int ret = 0; 1122 1123 if (!pp_funcs->print_clock_levels) 1124 return 0; 1125 1126 mutex_lock(&adev->pm.mutex); 1127 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1128 type, 1129 buf); 1130 mutex_unlock(&adev->pm.mutex); 1131 1132 return ret; 1133 } 1134 1135 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1136 enum pp_clock_type type, 1137 char *buf, 1138 int *offset) 1139 { 1140 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1141 int ret = 0; 1142 1143 if (!pp_funcs->emit_clock_levels) 1144 return -ENOENT; 1145 1146 mutex_lock(&adev->pm.mutex); 1147 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1148 type, 1149 buf, 1150 offset); 1151 mutex_unlock(&adev->pm.mutex); 1152 1153 return ret; 1154 } 1155 1156 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1157 uint64_t ppfeature_masks) 1158 { 1159 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1160 int ret = 0; 1161 1162 if (!pp_funcs->set_ppfeature_status) 1163 return 0; 1164 1165 mutex_lock(&adev->pm.mutex); 1166 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1167 ppfeature_masks); 1168 mutex_unlock(&adev->pm.mutex); 1169 1170 return ret; 1171 } 1172 1173 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1174 { 1175 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1176 int ret = 0; 1177 1178 if (!pp_funcs->get_ppfeature_status) 1179 return 0; 1180 1181 mutex_lock(&adev->pm.mutex); 1182 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1183 buf); 1184 mutex_unlock(&adev->pm.mutex); 1185 1186 return ret; 1187 } 1188 1189 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1190 enum pp_clock_type type, 1191 uint32_t mask) 1192 { 1193 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1194 int ret = 0; 1195 1196 if (!pp_funcs->force_clock_level) 1197 return 0; 1198 1199 mutex_lock(&adev->pm.mutex); 1200 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1201 type, 1202 mask); 1203 mutex_unlock(&adev->pm.mutex); 1204 1205 return ret; 1206 } 1207 1208 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1209 { 1210 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1211 int ret = 0; 1212 1213 if (!pp_funcs->get_sclk_od) 1214 return -EOPNOTSUPP; 1215 1216 mutex_lock(&adev->pm.mutex); 1217 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1218 mutex_unlock(&adev->pm.mutex); 1219 1220 return ret; 1221 } 1222 1223 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1224 { 1225 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1226 1227 if (is_support_sw_smu(adev)) 1228 return -EOPNOTSUPP; 1229 1230 mutex_lock(&adev->pm.mutex); 1231 if (pp_funcs->set_sclk_od) 1232 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1233 mutex_unlock(&adev->pm.mutex); 1234 1235 if (amdgpu_dpm_dispatch_task(adev, 1236 AMD_PP_TASK_READJUST_POWER_STATE, 1237 NULL) == -EOPNOTSUPP) { 1238 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1239 amdgpu_dpm_compute_clocks(adev); 1240 } 1241 1242 return 0; 1243 } 1244 1245 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1246 { 1247 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1248 int ret = 0; 1249 1250 if (!pp_funcs->get_mclk_od) 1251 return -EOPNOTSUPP; 1252 1253 mutex_lock(&adev->pm.mutex); 1254 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1255 mutex_unlock(&adev->pm.mutex); 1256 1257 return ret; 1258 } 1259 1260 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1261 { 1262 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1263 1264 if (is_support_sw_smu(adev)) 1265 return -EOPNOTSUPP; 1266 1267 mutex_lock(&adev->pm.mutex); 1268 if (pp_funcs->set_mclk_od) 1269 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1270 mutex_unlock(&adev->pm.mutex); 1271 1272 if (amdgpu_dpm_dispatch_task(adev, 1273 AMD_PP_TASK_READJUST_POWER_STATE, 1274 NULL) == -EOPNOTSUPP) { 1275 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1276 amdgpu_dpm_compute_clocks(adev); 1277 } 1278 1279 return 0; 1280 } 1281 1282 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1283 char *buf) 1284 { 1285 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1286 int ret = 0; 1287 1288 if (!pp_funcs->get_power_profile_mode) 1289 return -EOPNOTSUPP; 1290 1291 mutex_lock(&adev->pm.mutex); 1292 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1293 buf); 1294 mutex_unlock(&adev->pm.mutex); 1295 1296 return ret; 1297 } 1298 1299 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1300 long *input, uint32_t size) 1301 { 1302 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1303 int ret = 0; 1304 1305 if (!pp_funcs->set_power_profile_mode) 1306 return 0; 1307 1308 mutex_lock(&adev->pm.mutex); 1309 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1310 input, 1311 size); 1312 mutex_unlock(&adev->pm.mutex); 1313 1314 return ret; 1315 } 1316 1317 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1318 { 1319 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1320 int ret = 0; 1321 1322 if (!pp_funcs->get_gpu_metrics) 1323 return 0; 1324 1325 mutex_lock(&adev->pm.mutex); 1326 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1327 table); 1328 mutex_unlock(&adev->pm.mutex); 1329 1330 return ret; 1331 } 1332 1333 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, 1334 size_t size) 1335 { 1336 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1337 int ret = 0; 1338 1339 if (!pp_funcs->get_pm_metrics) 1340 return -EOPNOTSUPP; 1341 1342 mutex_lock(&adev->pm.mutex); 1343 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, 1344 size); 1345 mutex_unlock(&adev->pm.mutex); 1346 1347 return ret; 1348 } 1349 1350 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1351 uint32_t *fan_mode) 1352 { 1353 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1354 int ret = 0; 1355 1356 if (!pp_funcs->get_fan_control_mode) 1357 return -EOPNOTSUPP; 1358 1359 mutex_lock(&adev->pm.mutex); 1360 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1361 fan_mode); 1362 mutex_unlock(&adev->pm.mutex); 1363 1364 return ret; 1365 } 1366 1367 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1368 uint32_t speed) 1369 { 1370 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1371 int ret = 0; 1372 1373 if (!pp_funcs->set_fan_speed_pwm) 1374 return -EOPNOTSUPP; 1375 1376 mutex_lock(&adev->pm.mutex); 1377 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1378 speed); 1379 mutex_unlock(&adev->pm.mutex); 1380 1381 return ret; 1382 } 1383 1384 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1385 uint32_t *speed) 1386 { 1387 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1388 int ret = 0; 1389 1390 if (!pp_funcs->get_fan_speed_pwm) 1391 return -EOPNOTSUPP; 1392 1393 mutex_lock(&adev->pm.mutex); 1394 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1395 speed); 1396 mutex_unlock(&adev->pm.mutex); 1397 1398 return ret; 1399 } 1400 1401 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1402 uint32_t *speed) 1403 { 1404 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1405 int ret = 0; 1406 1407 if (!pp_funcs->get_fan_speed_rpm) 1408 return -EOPNOTSUPP; 1409 1410 mutex_lock(&adev->pm.mutex); 1411 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1412 speed); 1413 mutex_unlock(&adev->pm.mutex); 1414 1415 return ret; 1416 } 1417 1418 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1419 uint32_t speed) 1420 { 1421 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1422 int ret = 0; 1423 1424 if (!pp_funcs->set_fan_speed_rpm) 1425 return -EOPNOTSUPP; 1426 1427 mutex_lock(&adev->pm.mutex); 1428 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1429 speed); 1430 mutex_unlock(&adev->pm.mutex); 1431 1432 return ret; 1433 } 1434 1435 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1436 uint32_t mode) 1437 { 1438 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1439 int ret = 0; 1440 1441 if (!pp_funcs->set_fan_control_mode) 1442 return -EOPNOTSUPP; 1443 1444 mutex_lock(&adev->pm.mutex); 1445 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1446 mode); 1447 mutex_unlock(&adev->pm.mutex); 1448 1449 return ret; 1450 } 1451 1452 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1453 uint32_t *limit, 1454 enum pp_power_limit_level pp_limit_level, 1455 enum pp_power_type power_type) 1456 { 1457 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1458 int ret = 0; 1459 1460 if (!pp_funcs->get_power_limit) 1461 return -ENODATA; 1462 1463 mutex_lock(&adev->pm.mutex); 1464 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1465 limit, 1466 pp_limit_level, 1467 power_type); 1468 mutex_unlock(&adev->pm.mutex); 1469 1470 return ret; 1471 } 1472 1473 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1474 uint32_t limit) 1475 { 1476 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1477 int ret = 0; 1478 1479 if (!pp_funcs->set_power_limit) 1480 return -EINVAL; 1481 1482 mutex_lock(&adev->pm.mutex); 1483 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1484 limit); 1485 mutex_unlock(&adev->pm.mutex); 1486 1487 return ret; 1488 } 1489 1490 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1491 { 1492 bool cclk_dpm_supported = false; 1493 1494 if (!is_support_sw_smu(adev)) 1495 return false; 1496 1497 mutex_lock(&adev->pm.mutex); 1498 cclk_dpm_supported = is_support_cclk_dpm(adev); 1499 mutex_unlock(&adev->pm.mutex); 1500 1501 return (int)cclk_dpm_supported; 1502 } 1503 1504 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1505 struct seq_file *m) 1506 { 1507 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1508 1509 if (!pp_funcs->debugfs_print_current_performance_level) 1510 return -EOPNOTSUPP; 1511 1512 mutex_lock(&adev->pm.mutex); 1513 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1514 m); 1515 mutex_unlock(&adev->pm.mutex); 1516 1517 return 0; 1518 } 1519 1520 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1521 void **addr, 1522 size_t *size) 1523 { 1524 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1525 int ret = 0; 1526 1527 if (!pp_funcs->get_smu_prv_buf_details) 1528 return -ENOSYS; 1529 1530 mutex_lock(&adev->pm.mutex); 1531 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1532 addr, 1533 size); 1534 mutex_unlock(&adev->pm.mutex); 1535 1536 return ret; 1537 } 1538 1539 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1540 { 1541 if (is_support_sw_smu(adev)) { 1542 struct smu_context *smu = adev->powerplay.pp_handle; 1543 1544 return (smu->od_enabled || smu->is_apu); 1545 } else { 1546 struct pp_hwmgr *hwmgr; 1547 1548 /* 1549 * dpm on some legacy asics don't carry od_enabled member 1550 * as its pp_handle is casted directly from adev. 1551 */ 1552 if (amdgpu_dpm_is_legacy_dpm(adev)) 1553 return false; 1554 1555 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1556 1557 return hwmgr->od_enabled; 1558 } 1559 } 1560 1561 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1562 const char *buf, 1563 size_t size) 1564 { 1565 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1566 int ret = 0; 1567 1568 if (!pp_funcs->set_pp_table) 1569 return -EOPNOTSUPP; 1570 1571 mutex_lock(&adev->pm.mutex); 1572 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1573 buf, 1574 size); 1575 mutex_unlock(&adev->pm.mutex); 1576 1577 return ret; 1578 } 1579 1580 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1581 { 1582 struct smu_context *smu = adev->powerplay.pp_handle; 1583 1584 if (!is_support_sw_smu(adev)) 1585 return INT_MAX; 1586 1587 return smu->cpu_core_num; 1588 } 1589 1590 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1591 { 1592 if (!is_support_sw_smu(adev)) 1593 return; 1594 1595 amdgpu_smu_stb_debug_fs_init(adev); 1596 } 1597 1598 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1599 const struct amd_pp_display_configuration *input) 1600 { 1601 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1602 int ret = 0; 1603 1604 if (!pp_funcs->display_configuration_change) 1605 return 0; 1606 1607 mutex_lock(&adev->pm.mutex); 1608 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1609 input); 1610 mutex_unlock(&adev->pm.mutex); 1611 1612 return ret; 1613 } 1614 1615 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1616 enum amd_pp_clock_type type, 1617 struct amd_pp_clocks *clocks) 1618 { 1619 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1620 int ret = 0; 1621 1622 if (!pp_funcs->get_clock_by_type) 1623 return 0; 1624 1625 mutex_lock(&adev->pm.mutex); 1626 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1627 type, 1628 clocks); 1629 mutex_unlock(&adev->pm.mutex); 1630 1631 return ret; 1632 } 1633 1634 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1635 struct amd_pp_simple_clock_info *clocks) 1636 { 1637 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1638 int ret = 0; 1639 1640 if (!pp_funcs->get_display_mode_validation_clocks) 1641 return 0; 1642 1643 mutex_lock(&adev->pm.mutex); 1644 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1645 clocks); 1646 mutex_unlock(&adev->pm.mutex); 1647 1648 return ret; 1649 } 1650 1651 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1652 enum amd_pp_clock_type type, 1653 struct pp_clock_levels_with_latency *clocks) 1654 { 1655 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1656 int ret = 0; 1657 1658 if (!pp_funcs->get_clock_by_type_with_latency) 1659 return 0; 1660 1661 mutex_lock(&adev->pm.mutex); 1662 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1663 type, 1664 clocks); 1665 mutex_unlock(&adev->pm.mutex); 1666 1667 return ret; 1668 } 1669 1670 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1671 enum amd_pp_clock_type type, 1672 struct pp_clock_levels_with_voltage *clocks) 1673 { 1674 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1675 int ret = 0; 1676 1677 if (!pp_funcs->get_clock_by_type_with_voltage) 1678 return 0; 1679 1680 mutex_lock(&adev->pm.mutex); 1681 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1682 type, 1683 clocks); 1684 mutex_unlock(&adev->pm.mutex); 1685 1686 return ret; 1687 } 1688 1689 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1690 void *clock_ranges) 1691 { 1692 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1693 int ret = 0; 1694 1695 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1696 return -EOPNOTSUPP; 1697 1698 mutex_lock(&adev->pm.mutex); 1699 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1700 clock_ranges); 1701 mutex_unlock(&adev->pm.mutex); 1702 1703 return ret; 1704 } 1705 1706 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1707 struct pp_display_clock_request *clock) 1708 { 1709 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1710 int ret = 0; 1711 1712 if (!pp_funcs->display_clock_voltage_request) 1713 return -EOPNOTSUPP; 1714 1715 mutex_lock(&adev->pm.mutex); 1716 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1717 clock); 1718 mutex_unlock(&adev->pm.mutex); 1719 1720 return ret; 1721 } 1722 1723 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1724 struct amd_pp_clock_info *clocks) 1725 { 1726 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1727 int ret = 0; 1728 1729 if (!pp_funcs->get_current_clocks) 1730 return -EOPNOTSUPP; 1731 1732 mutex_lock(&adev->pm.mutex); 1733 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1734 clocks); 1735 mutex_unlock(&adev->pm.mutex); 1736 1737 return ret; 1738 } 1739 1740 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1741 { 1742 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1743 1744 if (!pp_funcs->notify_smu_enable_pwe) 1745 return; 1746 1747 mutex_lock(&adev->pm.mutex); 1748 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1749 mutex_unlock(&adev->pm.mutex); 1750 } 1751 1752 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1753 uint32_t count) 1754 { 1755 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1756 int ret = 0; 1757 1758 if (!pp_funcs->set_active_display_count) 1759 return -EOPNOTSUPP; 1760 1761 mutex_lock(&adev->pm.mutex); 1762 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1763 count); 1764 mutex_unlock(&adev->pm.mutex); 1765 1766 return ret; 1767 } 1768 1769 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1770 uint32_t clock) 1771 { 1772 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1773 int ret = 0; 1774 1775 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1776 return -EOPNOTSUPP; 1777 1778 mutex_lock(&adev->pm.mutex); 1779 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1780 clock); 1781 mutex_unlock(&adev->pm.mutex); 1782 1783 return ret; 1784 } 1785 1786 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1787 uint32_t clock) 1788 { 1789 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1790 1791 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1792 return; 1793 1794 mutex_lock(&adev->pm.mutex); 1795 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1796 clock); 1797 mutex_unlock(&adev->pm.mutex); 1798 } 1799 1800 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1801 uint32_t clock) 1802 { 1803 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1804 1805 if (!pp_funcs->set_hard_min_fclk_by_freq) 1806 return; 1807 1808 mutex_lock(&adev->pm.mutex); 1809 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1810 clock); 1811 mutex_unlock(&adev->pm.mutex); 1812 } 1813 1814 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1815 bool disable_memory_clock_switch) 1816 { 1817 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1818 int ret = 0; 1819 1820 if (!pp_funcs->display_disable_memory_clock_switch) 1821 return 0; 1822 1823 mutex_lock(&adev->pm.mutex); 1824 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1825 disable_memory_clock_switch); 1826 mutex_unlock(&adev->pm.mutex); 1827 1828 return ret; 1829 } 1830 1831 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1832 struct pp_smu_nv_clock_table *max_clocks) 1833 { 1834 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1835 int ret = 0; 1836 1837 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1838 return -EOPNOTSUPP; 1839 1840 mutex_lock(&adev->pm.mutex); 1841 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1842 max_clocks); 1843 mutex_unlock(&adev->pm.mutex); 1844 1845 return ret; 1846 } 1847 1848 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1849 unsigned int *clock_values_in_khz, 1850 unsigned int *num_states) 1851 { 1852 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1853 int ret = 0; 1854 1855 if (!pp_funcs->get_uclk_dpm_states) 1856 return -EOPNOTSUPP; 1857 1858 mutex_lock(&adev->pm.mutex); 1859 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1860 clock_values_in_khz, 1861 num_states); 1862 mutex_unlock(&adev->pm.mutex); 1863 1864 return ret; 1865 } 1866 1867 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1868 struct dpm_clocks *clock_table) 1869 { 1870 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1871 int ret = 0; 1872 1873 if (!pp_funcs->get_dpm_clock_table) 1874 return -EOPNOTSUPP; 1875 1876 mutex_lock(&adev->pm.mutex); 1877 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1878 clock_table); 1879 mutex_unlock(&adev->pm.mutex); 1880 1881 return ret; 1882 } 1883