1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 74 { 75 int ret = 0; 76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 78 79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 80 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 81 block_type, gate ? "gate" : "ungate"); 82 return 0; 83 } 84 85 mutex_lock(&adev->pm.mutex); 86 87 switch (block_type) { 88 case AMD_IP_BLOCK_TYPE_UVD: 89 case AMD_IP_BLOCK_TYPE_VCE: 90 case AMD_IP_BLOCK_TYPE_GFX: 91 case AMD_IP_BLOCK_TYPE_VCN: 92 case AMD_IP_BLOCK_TYPE_SDMA: 93 case AMD_IP_BLOCK_TYPE_JPEG: 94 case AMD_IP_BLOCK_TYPE_GMC: 95 case AMD_IP_BLOCK_TYPE_ACP: 96 case AMD_IP_BLOCK_TYPE_VPE: 97 if (pp_funcs && pp_funcs->set_powergating_by_smu) 98 ret = (pp_funcs->set_powergating_by_smu( 99 (adev)->powerplay.pp_handle, block_type, gate)); 100 break; 101 default: 102 break; 103 } 104 105 if (!ret) 106 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 107 108 mutex_unlock(&adev->pm.mutex); 109 110 return ret; 111 } 112 113 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 114 { 115 struct smu_context *smu = adev->powerplay.pp_handle; 116 int ret = -EOPNOTSUPP; 117 118 mutex_lock(&adev->pm.mutex); 119 ret = smu_set_gfx_power_up_by_imu(smu); 120 mutex_unlock(&adev->pm.mutex); 121 122 msleep(10); 123 124 return ret; 125 } 126 127 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 128 { 129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 130 void *pp_handle = adev->powerplay.pp_handle; 131 int ret = 0; 132 133 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 134 return -ENOENT; 135 136 mutex_lock(&adev->pm.mutex); 137 138 /* enter BACO state */ 139 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 140 141 mutex_unlock(&adev->pm.mutex); 142 143 return ret; 144 } 145 146 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 147 { 148 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 149 void *pp_handle = adev->powerplay.pp_handle; 150 int ret = 0; 151 152 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 153 return -ENOENT; 154 155 mutex_lock(&adev->pm.mutex); 156 157 /* exit BACO state */ 158 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 159 160 mutex_unlock(&adev->pm.mutex); 161 162 return ret; 163 } 164 165 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 166 enum pp_mp1_state mp1_state) 167 { 168 int ret = 0; 169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 170 171 if (pp_funcs && pp_funcs->set_mp1_state) { 172 mutex_lock(&adev->pm.mutex); 173 174 ret = pp_funcs->set_mp1_state( 175 adev->powerplay.pp_handle, 176 mp1_state); 177 178 mutex_unlock(&adev->pm.mutex); 179 } 180 181 return ret; 182 } 183 184 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) 185 { 186 int ret = 0; 187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 188 189 if (pp_funcs && pp_funcs->notify_rlc_state) { 190 mutex_lock(&adev->pm.mutex); 191 192 ret = pp_funcs->notify_rlc_state( 193 adev->powerplay.pp_handle, 194 en); 195 196 mutex_unlock(&adev->pm.mutex); 197 } 198 199 return ret; 200 } 201 202 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 203 { 204 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 205 void *pp_handle = adev->powerplay.pp_handle; 206 int ret; 207 208 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 209 return 0; 210 /* Don't use baco for reset in S3. 211 * This is a workaround for some platforms 212 * where entering BACO during suspend 213 * seems to cause reboots or hangs. 214 * This might be related to the fact that BACO controls 215 * power to the whole GPU including devices like audio and USB. 216 * Powering down/up everything may adversely affect these other 217 * devices. Needs more investigation. 218 */ 219 if (adev->in_s3) 220 return 0; 221 222 mutex_lock(&adev->pm.mutex); 223 224 ret = pp_funcs->get_asic_baco_capability(pp_handle); 225 226 mutex_unlock(&adev->pm.mutex); 227 228 return ret; 229 } 230 231 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 232 { 233 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 234 void *pp_handle = adev->powerplay.pp_handle; 235 int ret = 0; 236 237 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 238 return -ENOENT; 239 240 mutex_lock(&adev->pm.mutex); 241 242 ret = pp_funcs->asic_reset_mode_2(pp_handle); 243 244 mutex_unlock(&adev->pm.mutex); 245 246 return ret; 247 } 248 249 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 250 { 251 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 252 void *pp_handle = adev->powerplay.pp_handle; 253 int ret = 0; 254 255 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 256 return -ENOENT; 257 258 mutex_lock(&adev->pm.mutex); 259 260 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 261 262 mutex_unlock(&adev->pm.mutex); 263 264 return ret; 265 } 266 267 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 268 { 269 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 270 void *pp_handle = adev->powerplay.pp_handle; 271 int ret = 0; 272 273 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 274 return -ENOENT; 275 276 mutex_lock(&adev->pm.mutex); 277 278 /* enter BACO state */ 279 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 280 if (ret) 281 goto out; 282 283 /* exit BACO state */ 284 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 285 286 out: 287 mutex_unlock(&adev->pm.mutex); 288 return ret; 289 } 290 291 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 292 { 293 struct smu_context *smu = adev->powerplay.pp_handle; 294 bool support_mode1_reset = false; 295 296 if (is_support_sw_smu(adev)) { 297 mutex_lock(&adev->pm.mutex); 298 support_mode1_reset = smu_mode1_reset_is_support(smu); 299 mutex_unlock(&adev->pm.mutex); 300 } 301 302 return support_mode1_reset; 303 } 304 305 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 306 { 307 struct smu_context *smu = adev->powerplay.pp_handle; 308 int ret = -EOPNOTSUPP; 309 310 if (is_support_sw_smu(adev)) { 311 mutex_lock(&adev->pm.mutex); 312 ret = smu_mode1_reset(smu); 313 mutex_unlock(&adev->pm.mutex); 314 } 315 316 return ret; 317 } 318 319 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 320 enum PP_SMC_POWER_PROFILE type, 321 bool en) 322 { 323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 324 int ret = 0; 325 326 if (amdgpu_sriov_vf(adev)) 327 return 0; 328 329 if (pp_funcs && pp_funcs->switch_power_profile) { 330 mutex_lock(&adev->pm.mutex); 331 ret = pp_funcs->switch_power_profile( 332 adev->powerplay.pp_handle, type, en); 333 mutex_unlock(&adev->pm.mutex); 334 } 335 336 return ret; 337 } 338 339 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 340 uint32_t pstate) 341 { 342 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 343 int ret = 0; 344 345 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 346 mutex_lock(&adev->pm.mutex); 347 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 348 pstate); 349 mutex_unlock(&adev->pm.mutex); 350 } 351 352 return ret; 353 } 354 355 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 356 uint32_t cstate) 357 { 358 int ret = 0; 359 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 360 void *pp_handle = adev->powerplay.pp_handle; 361 362 if (pp_funcs && pp_funcs->set_df_cstate) { 363 mutex_lock(&adev->pm.mutex); 364 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 365 mutex_unlock(&adev->pm.mutex); 366 } 367 368 return ret; 369 } 370 371 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, 372 enum pp_pm_policy p_type, char *buf) 373 { 374 struct smu_context *smu = adev->powerplay.pp_handle; 375 int ret = -EOPNOTSUPP; 376 377 if (is_support_sw_smu(adev)) { 378 mutex_lock(&adev->pm.mutex); 379 ret = smu_get_pm_policy_info(smu, p_type, buf); 380 mutex_unlock(&adev->pm.mutex); 381 } 382 383 return ret; 384 } 385 386 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, 387 int policy_level) 388 { 389 struct smu_context *smu = adev->powerplay.pp_handle; 390 int ret = -EOPNOTSUPP; 391 392 if (is_support_sw_smu(adev)) { 393 mutex_lock(&adev->pm.mutex); 394 ret = smu_set_pm_policy(smu, policy_type, policy_level); 395 mutex_unlock(&adev->pm.mutex); 396 } 397 398 return ret; 399 } 400 401 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 402 { 403 void *pp_handle = adev->powerplay.pp_handle; 404 const struct amd_pm_funcs *pp_funcs = 405 adev->powerplay.pp_funcs; 406 int ret = 0; 407 408 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 409 mutex_lock(&adev->pm.mutex); 410 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 411 mutex_unlock(&adev->pm.mutex); 412 } 413 414 return ret; 415 } 416 417 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 418 uint32_t msg_id) 419 { 420 void *pp_handle = adev->powerplay.pp_handle; 421 const struct amd_pm_funcs *pp_funcs = 422 adev->powerplay.pp_funcs; 423 int ret = 0; 424 425 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 426 mutex_lock(&adev->pm.mutex); 427 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 428 msg_id); 429 mutex_unlock(&adev->pm.mutex); 430 } 431 432 return ret; 433 } 434 435 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 436 bool acquire) 437 { 438 void *pp_handle = adev->powerplay.pp_handle; 439 const struct amd_pm_funcs *pp_funcs = 440 adev->powerplay.pp_funcs; 441 int ret = -EOPNOTSUPP; 442 443 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 444 mutex_lock(&adev->pm.mutex); 445 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 446 acquire); 447 mutex_unlock(&adev->pm.mutex); 448 } 449 450 return ret; 451 } 452 453 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 454 { 455 if (adev->pm.dpm_enabled) { 456 mutex_lock(&adev->pm.mutex); 457 if (power_supply_is_system_supplied() > 0) 458 adev->pm.ac_power = true; 459 else 460 adev->pm.ac_power = false; 461 462 if (adev->powerplay.pp_funcs && 463 adev->powerplay.pp_funcs->enable_bapm) 464 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 465 466 if (is_support_sw_smu(adev)) 467 smu_set_ac_dc(adev->powerplay.pp_handle); 468 469 mutex_unlock(&adev->pm.mutex); 470 } 471 } 472 473 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 474 void *data, uint32_t *size) 475 { 476 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 477 int ret = -EINVAL; 478 479 if (!data || !size) 480 return -EINVAL; 481 482 if (pp_funcs && pp_funcs->read_sensor) { 483 mutex_lock(&adev->pm.mutex); 484 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 485 sensor, 486 data, 487 size); 488 mutex_unlock(&adev->pm.mutex); 489 } 490 491 return ret; 492 } 493 494 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 495 { 496 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 497 int ret = -EOPNOTSUPP; 498 499 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 500 mutex_lock(&adev->pm.mutex); 501 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 502 mutex_unlock(&adev->pm.mutex); 503 } 504 505 return ret; 506 } 507 508 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 509 { 510 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 511 int ret = -EOPNOTSUPP; 512 513 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 514 mutex_lock(&adev->pm.mutex); 515 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 516 mutex_unlock(&adev->pm.mutex); 517 } 518 519 return ret; 520 } 521 522 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 523 { 524 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 525 int i; 526 527 if (!adev->pm.dpm_enabled) 528 return; 529 530 if (!pp_funcs->pm_compute_clocks) 531 return; 532 533 if (adev->mode_info.num_crtc) 534 amdgpu_display_bandwidth_update(adev); 535 536 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 537 struct amdgpu_ring *ring = adev->rings[i]; 538 if (ring && ring->sched.ready) 539 amdgpu_fence_wait_empty(ring); 540 } 541 542 mutex_lock(&adev->pm.mutex); 543 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 544 mutex_unlock(&adev->pm.mutex); 545 } 546 547 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 548 { 549 int ret = 0; 550 551 if (adev->family == AMDGPU_FAMILY_SI) { 552 mutex_lock(&adev->pm.mutex); 553 if (enable) { 554 adev->pm.dpm.uvd_active = true; 555 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 556 } else { 557 adev->pm.dpm.uvd_active = false; 558 } 559 mutex_unlock(&adev->pm.mutex); 560 561 amdgpu_dpm_compute_clocks(adev); 562 return; 563 } 564 565 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 566 if (ret) 567 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 568 enable ? "enable" : "disable", ret); 569 } 570 571 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 572 { 573 int ret = 0; 574 575 if (adev->family == AMDGPU_FAMILY_SI) { 576 mutex_lock(&adev->pm.mutex); 577 if (enable) { 578 adev->pm.dpm.vce_active = true; 579 /* XXX select vce level based on ring/task */ 580 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 581 } else { 582 adev->pm.dpm.vce_active = false; 583 } 584 mutex_unlock(&adev->pm.mutex); 585 586 amdgpu_dpm_compute_clocks(adev); 587 return; 588 } 589 590 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 591 if (ret) 592 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 593 enable ? "enable" : "disable", ret); 594 } 595 596 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 597 { 598 int ret = 0; 599 600 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 601 if (ret) 602 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 603 enable ? "enable" : "disable", ret); 604 } 605 606 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) 607 { 608 int ret = 0; 609 610 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable); 611 if (ret) 612 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", 613 enable ? "enable" : "disable", ret); 614 } 615 616 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 617 { 618 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 619 int r = 0; 620 621 if (!pp_funcs || !pp_funcs->load_firmware) 622 return 0; 623 624 mutex_lock(&adev->pm.mutex); 625 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 626 if (r) { 627 pr_err("smu firmware loading failed\n"); 628 goto out; 629 } 630 631 if (smu_version) 632 *smu_version = adev->pm.fw_version; 633 634 out: 635 mutex_unlock(&adev->pm.mutex); 636 return r; 637 } 638 639 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 640 { 641 int ret = 0; 642 643 if (is_support_sw_smu(adev)) { 644 mutex_lock(&adev->pm.mutex); 645 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 646 enable); 647 mutex_unlock(&adev->pm.mutex); 648 } 649 650 return ret; 651 } 652 653 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 654 { 655 struct smu_context *smu = adev->powerplay.pp_handle; 656 int ret = 0; 657 658 if (!is_support_sw_smu(adev)) 659 return -EOPNOTSUPP; 660 661 mutex_lock(&adev->pm.mutex); 662 ret = smu_send_hbm_bad_pages_num(smu, size); 663 mutex_unlock(&adev->pm.mutex); 664 665 return ret; 666 } 667 668 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 669 { 670 struct smu_context *smu = adev->powerplay.pp_handle; 671 int ret = 0; 672 673 if (!is_support_sw_smu(adev)) 674 return -EOPNOTSUPP; 675 676 mutex_lock(&adev->pm.mutex); 677 ret = smu_send_hbm_bad_channel_flag(smu, size); 678 mutex_unlock(&adev->pm.mutex); 679 680 return ret; 681 } 682 683 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) 684 { 685 struct smu_context *smu = adev->powerplay.pp_handle; 686 int ret; 687 688 if (!is_support_sw_smu(adev)) 689 return -EOPNOTSUPP; 690 691 mutex_lock(&adev->pm.mutex); 692 ret = smu_send_rma_reason(smu); 693 mutex_unlock(&adev->pm.mutex); 694 695 return ret; 696 } 697 698 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 699 enum pp_clock_type type, 700 uint32_t *min, 701 uint32_t *max) 702 { 703 int ret = 0; 704 705 if (type != PP_SCLK) 706 return -EINVAL; 707 708 if (!is_support_sw_smu(adev)) 709 return -EOPNOTSUPP; 710 711 mutex_lock(&adev->pm.mutex); 712 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 713 SMU_SCLK, 714 min, 715 max); 716 mutex_unlock(&adev->pm.mutex); 717 718 return ret; 719 } 720 721 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 722 enum pp_clock_type type, 723 uint32_t min, 724 uint32_t max) 725 { 726 struct smu_context *smu = adev->powerplay.pp_handle; 727 int ret = 0; 728 729 if (type != PP_SCLK) 730 return -EINVAL; 731 732 if (!is_support_sw_smu(adev)) 733 return -EOPNOTSUPP; 734 735 mutex_lock(&adev->pm.mutex); 736 ret = smu_set_soft_freq_range(smu, 737 SMU_SCLK, 738 min, 739 max); 740 mutex_unlock(&adev->pm.mutex); 741 742 return ret; 743 } 744 745 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 746 { 747 struct smu_context *smu = adev->powerplay.pp_handle; 748 int ret = 0; 749 750 if (!is_support_sw_smu(adev)) 751 return 0; 752 753 mutex_lock(&adev->pm.mutex); 754 ret = smu_write_watermarks_table(smu); 755 mutex_unlock(&adev->pm.mutex); 756 757 return ret; 758 } 759 760 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 761 enum smu_event_type event, 762 uint64_t event_arg) 763 { 764 struct smu_context *smu = adev->powerplay.pp_handle; 765 int ret = 0; 766 767 if (!is_support_sw_smu(adev)) 768 return -EOPNOTSUPP; 769 770 mutex_lock(&adev->pm.mutex); 771 ret = smu_wait_for_event(smu, event, event_arg); 772 mutex_unlock(&adev->pm.mutex); 773 774 return ret; 775 } 776 777 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 778 { 779 struct smu_context *smu = adev->powerplay.pp_handle; 780 int ret = 0; 781 782 if (!is_support_sw_smu(adev)) 783 return -EOPNOTSUPP; 784 785 mutex_lock(&adev->pm.mutex); 786 ret = smu_set_residency_gfxoff(smu, value); 787 mutex_unlock(&adev->pm.mutex); 788 789 return ret; 790 } 791 792 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 793 { 794 struct smu_context *smu = adev->powerplay.pp_handle; 795 int ret = 0; 796 797 if (!is_support_sw_smu(adev)) 798 return -EOPNOTSUPP; 799 800 mutex_lock(&adev->pm.mutex); 801 ret = smu_get_residency_gfxoff(smu, value); 802 mutex_unlock(&adev->pm.mutex); 803 804 return ret; 805 } 806 807 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 808 { 809 struct smu_context *smu = adev->powerplay.pp_handle; 810 int ret = 0; 811 812 if (!is_support_sw_smu(adev)) 813 return -EOPNOTSUPP; 814 815 mutex_lock(&adev->pm.mutex); 816 ret = smu_get_entrycount_gfxoff(smu, value); 817 mutex_unlock(&adev->pm.mutex); 818 819 return ret; 820 } 821 822 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 823 { 824 struct smu_context *smu = adev->powerplay.pp_handle; 825 int ret = 0; 826 827 if (!is_support_sw_smu(adev)) 828 return -EOPNOTSUPP; 829 830 mutex_lock(&adev->pm.mutex); 831 ret = smu_get_status_gfxoff(smu, value); 832 mutex_unlock(&adev->pm.mutex); 833 834 return ret; 835 } 836 837 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 838 { 839 struct smu_context *smu = adev->powerplay.pp_handle; 840 841 if (!is_support_sw_smu(adev)) 842 return 0; 843 844 return atomic64_read(&smu->throttle_int_counter); 845 } 846 847 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 848 * @adev: amdgpu_device pointer 849 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 850 * 851 */ 852 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 853 enum gfx_change_state state) 854 { 855 mutex_lock(&adev->pm.mutex); 856 if (adev->powerplay.pp_funcs && 857 adev->powerplay.pp_funcs->gfx_state_change_set) 858 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 859 (adev)->powerplay.pp_handle, state)); 860 mutex_unlock(&adev->pm.mutex); 861 } 862 863 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 864 void *umc_ecc) 865 { 866 struct smu_context *smu = adev->powerplay.pp_handle; 867 int ret = 0; 868 869 if (!is_support_sw_smu(adev)) 870 return -EOPNOTSUPP; 871 872 mutex_lock(&adev->pm.mutex); 873 ret = smu_get_ecc_info(smu, umc_ecc); 874 mutex_unlock(&adev->pm.mutex); 875 876 return ret; 877 } 878 879 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 880 uint32_t idx) 881 { 882 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 883 struct amd_vce_state *vstate = NULL; 884 885 if (!pp_funcs->get_vce_clock_state) 886 return NULL; 887 888 mutex_lock(&adev->pm.mutex); 889 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 890 idx); 891 mutex_unlock(&adev->pm.mutex); 892 893 return vstate; 894 } 895 896 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 897 enum amd_pm_state_type *state) 898 { 899 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 900 901 mutex_lock(&adev->pm.mutex); 902 903 if (!pp_funcs->get_current_power_state) { 904 *state = adev->pm.dpm.user_state; 905 goto out; 906 } 907 908 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 909 if (*state < POWER_STATE_TYPE_DEFAULT || 910 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 911 *state = adev->pm.dpm.user_state; 912 913 out: 914 mutex_unlock(&adev->pm.mutex); 915 } 916 917 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 918 enum amd_pm_state_type state) 919 { 920 mutex_lock(&adev->pm.mutex); 921 adev->pm.dpm.user_state = state; 922 mutex_unlock(&adev->pm.mutex); 923 924 if (is_support_sw_smu(adev)) 925 return; 926 927 if (amdgpu_dpm_dispatch_task(adev, 928 AMD_PP_TASK_ENABLE_USER_STATE, 929 &state) == -EOPNOTSUPP) 930 amdgpu_dpm_compute_clocks(adev); 931 } 932 933 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 934 { 935 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 936 enum amd_dpm_forced_level level; 937 938 if (!pp_funcs) 939 return AMD_DPM_FORCED_LEVEL_AUTO; 940 941 mutex_lock(&adev->pm.mutex); 942 if (pp_funcs->get_performance_level) 943 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 944 else 945 level = adev->pm.dpm.forced_level; 946 mutex_unlock(&adev->pm.mutex); 947 948 return level; 949 } 950 951 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 952 enum amd_dpm_forced_level level) 953 { 954 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 955 enum amd_dpm_forced_level current_level; 956 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 957 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 958 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 959 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 960 961 if (!pp_funcs || !pp_funcs->force_performance_level) 962 return 0; 963 964 if (adev->pm.dpm.thermal_active) 965 return -EINVAL; 966 967 current_level = amdgpu_dpm_get_performance_level(adev); 968 if (current_level == level) 969 return 0; 970 971 if (adev->asic_type == CHIP_RAVEN) { 972 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 973 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 974 level == AMD_DPM_FORCED_LEVEL_MANUAL) 975 amdgpu_gfx_off_ctrl(adev, false); 976 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 977 level != AMD_DPM_FORCED_LEVEL_MANUAL) 978 amdgpu_gfx_off_ctrl(adev, true); 979 } 980 } 981 982 if (!(current_level & profile_mode_mask) && 983 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 984 return -EINVAL; 985 986 if (!(current_level & profile_mode_mask) && 987 (level & profile_mode_mask)) { 988 /* enter UMD Pstate */ 989 amdgpu_device_ip_set_powergating_state(adev, 990 AMD_IP_BLOCK_TYPE_GFX, 991 AMD_PG_STATE_UNGATE); 992 amdgpu_device_ip_set_clockgating_state(adev, 993 AMD_IP_BLOCK_TYPE_GFX, 994 AMD_CG_STATE_UNGATE); 995 } else if ((current_level & profile_mode_mask) && 996 !(level & profile_mode_mask)) { 997 /* exit UMD Pstate */ 998 amdgpu_device_ip_set_clockgating_state(adev, 999 AMD_IP_BLOCK_TYPE_GFX, 1000 AMD_CG_STATE_GATE); 1001 amdgpu_device_ip_set_powergating_state(adev, 1002 AMD_IP_BLOCK_TYPE_GFX, 1003 AMD_PG_STATE_GATE); 1004 } 1005 1006 mutex_lock(&adev->pm.mutex); 1007 1008 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 1009 level)) { 1010 mutex_unlock(&adev->pm.mutex); 1011 return -EINVAL; 1012 } 1013 1014 adev->pm.dpm.forced_level = level; 1015 1016 mutex_unlock(&adev->pm.mutex); 1017 1018 return 0; 1019 } 1020 1021 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 1022 struct pp_states_info *states) 1023 { 1024 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1025 int ret = 0; 1026 1027 if (!pp_funcs->get_pp_num_states) 1028 return -EOPNOTSUPP; 1029 1030 mutex_lock(&adev->pm.mutex); 1031 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 1032 states); 1033 mutex_unlock(&adev->pm.mutex); 1034 1035 return ret; 1036 } 1037 1038 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 1039 enum amd_pp_task task_id, 1040 enum amd_pm_state_type *user_state) 1041 { 1042 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1043 int ret = 0; 1044 1045 if (!pp_funcs->dispatch_tasks) 1046 return -EOPNOTSUPP; 1047 1048 mutex_lock(&adev->pm.mutex); 1049 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 1050 task_id, 1051 user_state); 1052 mutex_unlock(&adev->pm.mutex); 1053 1054 return ret; 1055 } 1056 1057 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1058 { 1059 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1060 int ret = 0; 1061 1062 if (!pp_funcs->get_pp_table) 1063 return 0; 1064 1065 mutex_lock(&adev->pm.mutex); 1066 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1067 table); 1068 mutex_unlock(&adev->pm.mutex); 1069 1070 return ret; 1071 } 1072 1073 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1074 uint32_t type, 1075 long *input, 1076 uint32_t size) 1077 { 1078 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1079 int ret = 0; 1080 1081 if (!pp_funcs->set_fine_grain_clk_vol) 1082 return 0; 1083 1084 mutex_lock(&adev->pm.mutex); 1085 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1086 type, 1087 input, 1088 size); 1089 mutex_unlock(&adev->pm.mutex); 1090 1091 return ret; 1092 } 1093 1094 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1095 uint32_t type, 1096 long *input, 1097 uint32_t size) 1098 { 1099 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1100 int ret = 0; 1101 1102 if (!pp_funcs->odn_edit_dpm_table) 1103 return 0; 1104 1105 mutex_lock(&adev->pm.mutex); 1106 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1107 type, 1108 input, 1109 size); 1110 mutex_unlock(&adev->pm.mutex); 1111 1112 return ret; 1113 } 1114 1115 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1116 enum pp_clock_type type, 1117 char *buf) 1118 { 1119 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1120 int ret = 0; 1121 1122 if (!pp_funcs->print_clock_levels) 1123 return 0; 1124 1125 mutex_lock(&adev->pm.mutex); 1126 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1127 type, 1128 buf); 1129 mutex_unlock(&adev->pm.mutex); 1130 1131 return ret; 1132 } 1133 1134 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1135 enum pp_clock_type type, 1136 char *buf, 1137 int *offset) 1138 { 1139 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1140 int ret = 0; 1141 1142 if (!pp_funcs->emit_clock_levels) 1143 return -ENOENT; 1144 1145 mutex_lock(&adev->pm.mutex); 1146 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1147 type, 1148 buf, 1149 offset); 1150 mutex_unlock(&adev->pm.mutex); 1151 1152 return ret; 1153 } 1154 1155 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1156 uint64_t ppfeature_masks) 1157 { 1158 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1159 int ret = 0; 1160 1161 if (!pp_funcs->set_ppfeature_status) 1162 return 0; 1163 1164 mutex_lock(&adev->pm.mutex); 1165 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1166 ppfeature_masks); 1167 mutex_unlock(&adev->pm.mutex); 1168 1169 return ret; 1170 } 1171 1172 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1173 { 1174 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1175 int ret = 0; 1176 1177 if (!pp_funcs->get_ppfeature_status) 1178 return 0; 1179 1180 mutex_lock(&adev->pm.mutex); 1181 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1182 buf); 1183 mutex_unlock(&adev->pm.mutex); 1184 1185 return ret; 1186 } 1187 1188 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1189 enum pp_clock_type type, 1190 uint32_t mask) 1191 { 1192 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1193 int ret = 0; 1194 1195 if (!pp_funcs->force_clock_level) 1196 return 0; 1197 1198 mutex_lock(&adev->pm.mutex); 1199 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1200 type, 1201 mask); 1202 mutex_unlock(&adev->pm.mutex); 1203 1204 return ret; 1205 } 1206 1207 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1208 { 1209 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1210 int ret = 0; 1211 1212 if (!pp_funcs->get_sclk_od) 1213 return -EOPNOTSUPP; 1214 1215 mutex_lock(&adev->pm.mutex); 1216 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1217 mutex_unlock(&adev->pm.mutex); 1218 1219 return ret; 1220 } 1221 1222 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1223 { 1224 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1225 1226 if (is_support_sw_smu(adev)) 1227 return -EOPNOTSUPP; 1228 1229 mutex_lock(&adev->pm.mutex); 1230 if (pp_funcs->set_sclk_od) 1231 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1232 mutex_unlock(&adev->pm.mutex); 1233 1234 if (amdgpu_dpm_dispatch_task(adev, 1235 AMD_PP_TASK_READJUST_POWER_STATE, 1236 NULL) == -EOPNOTSUPP) { 1237 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1238 amdgpu_dpm_compute_clocks(adev); 1239 } 1240 1241 return 0; 1242 } 1243 1244 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1245 { 1246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1247 int ret = 0; 1248 1249 if (!pp_funcs->get_mclk_od) 1250 return -EOPNOTSUPP; 1251 1252 mutex_lock(&adev->pm.mutex); 1253 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1254 mutex_unlock(&adev->pm.mutex); 1255 1256 return ret; 1257 } 1258 1259 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1260 { 1261 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1262 1263 if (is_support_sw_smu(adev)) 1264 return -EOPNOTSUPP; 1265 1266 mutex_lock(&adev->pm.mutex); 1267 if (pp_funcs->set_mclk_od) 1268 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1269 mutex_unlock(&adev->pm.mutex); 1270 1271 if (amdgpu_dpm_dispatch_task(adev, 1272 AMD_PP_TASK_READJUST_POWER_STATE, 1273 NULL) == -EOPNOTSUPP) { 1274 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1275 amdgpu_dpm_compute_clocks(adev); 1276 } 1277 1278 return 0; 1279 } 1280 1281 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1282 char *buf) 1283 { 1284 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1285 int ret = 0; 1286 1287 if (!pp_funcs->get_power_profile_mode) 1288 return -EOPNOTSUPP; 1289 1290 mutex_lock(&adev->pm.mutex); 1291 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1292 buf); 1293 mutex_unlock(&adev->pm.mutex); 1294 1295 return ret; 1296 } 1297 1298 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1299 long *input, uint32_t size) 1300 { 1301 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1302 int ret = 0; 1303 1304 if (!pp_funcs->set_power_profile_mode) 1305 return 0; 1306 1307 mutex_lock(&adev->pm.mutex); 1308 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1309 input, 1310 size); 1311 mutex_unlock(&adev->pm.mutex); 1312 1313 return ret; 1314 } 1315 1316 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1317 { 1318 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1319 int ret = 0; 1320 1321 if (!pp_funcs->get_gpu_metrics) 1322 return 0; 1323 1324 mutex_lock(&adev->pm.mutex); 1325 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1326 table); 1327 mutex_unlock(&adev->pm.mutex); 1328 1329 return ret; 1330 } 1331 1332 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, 1333 size_t size) 1334 { 1335 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1336 int ret = 0; 1337 1338 if (!pp_funcs->get_pm_metrics) 1339 return -EOPNOTSUPP; 1340 1341 mutex_lock(&adev->pm.mutex); 1342 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, 1343 size); 1344 mutex_unlock(&adev->pm.mutex); 1345 1346 return ret; 1347 } 1348 1349 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1350 uint32_t *fan_mode) 1351 { 1352 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1353 int ret = 0; 1354 1355 if (!pp_funcs->get_fan_control_mode) 1356 return -EOPNOTSUPP; 1357 1358 mutex_lock(&adev->pm.mutex); 1359 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1360 fan_mode); 1361 mutex_unlock(&adev->pm.mutex); 1362 1363 return ret; 1364 } 1365 1366 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1367 uint32_t speed) 1368 { 1369 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1370 int ret = 0; 1371 1372 if (!pp_funcs->set_fan_speed_pwm) 1373 return -EOPNOTSUPP; 1374 1375 mutex_lock(&adev->pm.mutex); 1376 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1377 speed); 1378 mutex_unlock(&adev->pm.mutex); 1379 1380 return ret; 1381 } 1382 1383 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1384 uint32_t *speed) 1385 { 1386 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1387 int ret = 0; 1388 1389 if (!pp_funcs->get_fan_speed_pwm) 1390 return -EOPNOTSUPP; 1391 1392 mutex_lock(&adev->pm.mutex); 1393 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1394 speed); 1395 mutex_unlock(&adev->pm.mutex); 1396 1397 return ret; 1398 } 1399 1400 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1401 uint32_t *speed) 1402 { 1403 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1404 int ret = 0; 1405 1406 if (!pp_funcs->get_fan_speed_rpm) 1407 return -EOPNOTSUPP; 1408 1409 mutex_lock(&adev->pm.mutex); 1410 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1411 speed); 1412 mutex_unlock(&adev->pm.mutex); 1413 1414 return ret; 1415 } 1416 1417 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1418 uint32_t speed) 1419 { 1420 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1421 int ret = 0; 1422 1423 if (!pp_funcs->set_fan_speed_rpm) 1424 return -EOPNOTSUPP; 1425 1426 mutex_lock(&adev->pm.mutex); 1427 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1428 speed); 1429 mutex_unlock(&adev->pm.mutex); 1430 1431 return ret; 1432 } 1433 1434 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1435 uint32_t mode) 1436 { 1437 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1438 int ret = 0; 1439 1440 if (!pp_funcs->set_fan_control_mode) 1441 return -EOPNOTSUPP; 1442 1443 mutex_lock(&adev->pm.mutex); 1444 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1445 mode); 1446 mutex_unlock(&adev->pm.mutex); 1447 1448 return ret; 1449 } 1450 1451 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1452 uint32_t *limit, 1453 enum pp_power_limit_level pp_limit_level, 1454 enum pp_power_type power_type) 1455 { 1456 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1457 int ret = 0; 1458 1459 if (!pp_funcs->get_power_limit) 1460 return -ENODATA; 1461 1462 mutex_lock(&adev->pm.mutex); 1463 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1464 limit, 1465 pp_limit_level, 1466 power_type); 1467 mutex_unlock(&adev->pm.mutex); 1468 1469 return ret; 1470 } 1471 1472 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1473 uint32_t limit) 1474 { 1475 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1476 int ret = 0; 1477 1478 if (!pp_funcs->set_power_limit) 1479 return -EINVAL; 1480 1481 mutex_lock(&adev->pm.mutex); 1482 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1483 limit); 1484 mutex_unlock(&adev->pm.mutex); 1485 1486 return ret; 1487 } 1488 1489 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1490 { 1491 bool cclk_dpm_supported = false; 1492 1493 if (!is_support_sw_smu(adev)) 1494 return false; 1495 1496 mutex_lock(&adev->pm.mutex); 1497 cclk_dpm_supported = is_support_cclk_dpm(adev); 1498 mutex_unlock(&adev->pm.mutex); 1499 1500 return (int)cclk_dpm_supported; 1501 } 1502 1503 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1504 struct seq_file *m) 1505 { 1506 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1507 1508 if (!pp_funcs->debugfs_print_current_performance_level) 1509 return -EOPNOTSUPP; 1510 1511 mutex_lock(&adev->pm.mutex); 1512 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1513 m); 1514 mutex_unlock(&adev->pm.mutex); 1515 1516 return 0; 1517 } 1518 1519 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1520 void **addr, 1521 size_t *size) 1522 { 1523 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1524 int ret = 0; 1525 1526 if (!pp_funcs->get_smu_prv_buf_details) 1527 return -ENOSYS; 1528 1529 mutex_lock(&adev->pm.mutex); 1530 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1531 addr, 1532 size); 1533 mutex_unlock(&adev->pm.mutex); 1534 1535 return ret; 1536 } 1537 1538 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1539 { 1540 if (is_support_sw_smu(adev)) { 1541 struct smu_context *smu = adev->powerplay.pp_handle; 1542 1543 return (smu->od_enabled || smu->is_apu); 1544 } else { 1545 struct pp_hwmgr *hwmgr; 1546 1547 /* 1548 * dpm on some legacy asics don't carry od_enabled member 1549 * as its pp_handle is casted directly from adev. 1550 */ 1551 if (amdgpu_dpm_is_legacy_dpm(adev)) 1552 return false; 1553 1554 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1555 1556 return hwmgr->od_enabled; 1557 } 1558 } 1559 1560 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1561 const char *buf, 1562 size_t size) 1563 { 1564 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1565 int ret = 0; 1566 1567 if (!pp_funcs->set_pp_table) 1568 return -EOPNOTSUPP; 1569 1570 mutex_lock(&adev->pm.mutex); 1571 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1572 buf, 1573 size); 1574 mutex_unlock(&adev->pm.mutex); 1575 1576 return ret; 1577 } 1578 1579 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1580 { 1581 struct smu_context *smu = adev->powerplay.pp_handle; 1582 1583 if (!is_support_sw_smu(adev)) 1584 return INT_MAX; 1585 1586 return smu->cpu_core_num; 1587 } 1588 1589 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1590 { 1591 if (!is_support_sw_smu(adev)) 1592 return; 1593 1594 amdgpu_smu_stb_debug_fs_init(adev); 1595 } 1596 1597 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1598 const struct amd_pp_display_configuration *input) 1599 { 1600 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1601 int ret = 0; 1602 1603 if (!pp_funcs->display_configuration_change) 1604 return 0; 1605 1606 mutex_lock(&adev->pm.mutex); 1607 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1608 input); 1609 mutex_unlock(&adev->pm.mutex); 1610 1611 return ret; 1612 } 1613 1614 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1615 enum amd_pp_clock_type type, 1616 struct amd_pp_clocks *clocks) 1617 { 1618 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1619 int ret = 0; 1620 1621 if (!pp_funcs->get_clock_by_type) 1622 return 0; 1623 1624 mutex_lock(&adev->pm.mutex); 1625 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1626 type, 1627 clocks); 1628 mutex_unlock(&adev->pm.mutex); 1629 1630 return ret; 1631 } 1632 1633 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1634 struct amd_pp_simple_clock_info *clocks) 1635 { 1636 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1637 int ret = 0; 1638 1639 if (!pp_funcs->get_display_mode_validation_clocks) 1640 return 0; 1641 1642 mutex_lock(&adev->pm.mutex); 1643 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1644 clocks); 1645 mutex_unlock(&adev->pm.mutex); 1646 1647 return ret; 1648 } 1649 1650 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1651 enum amd_pp_clock_type type, 1652 struct pp_clock_levels_with_latency *clocks) 1653 { 1654 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1655 int ret = 0; 1656 1657 if (!pp_funcs->get_clock_by_type_with_latency) 1658 return 0; 1659 1660 mutex_lock(&adev->pm.mutex); 1661 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1662 type, 1663 clocks); 1664 mutex_unlock(&adev->pm.mutex); 1665 1666 return ret; 1667 } 1668 1669 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1670 enum amd_pp_clock_type type, 1671 struct pp_clock_levels_with_voltage *clocks) 1672 { 1673 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1674 int ret = 0; 1675 1676 if (!pp_funcs->get_clock_by_type_with_voltage) 1677 return 0; 1678 1679 mutex_lock(&adev->pm.mutex); 1680 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1681 type, 1682 clocks); 1683 mutex_unlock(&adev->pm.mutex); 1684 1685 return ret; 1686 } 1687 1688 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1689 void *clock_ranges) 1690 { 1691 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1692 int ret = 0; 1693 1694 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1695 return -EOPNOTSUPP; 1696 1697 mutex_lock(&adev->pm.mutex); 1698 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1699 clock_ranges); 1700 mutex_unlock(&adev->pm.mutex); 1701 1702 return ret; 1703 } 1704 1705 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1706 struct pp_display_clock_request *clock) 1707 { 1708 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1709 int ret = 0; 1710 1711 if (!pp_funcs->display_clock_voltage_request) 1712 return -EOPNOTSUPP; 1713 1714 mutex_lock(&adev->pm.mutex); 1715 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1716 clock); 1717 mutex_unlock(&adev->pm.mutex); 1718 1719 return ret; 1720 } 1721 1722 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1723 struct amd_pp_clock_info *clocks) 1724 { 1725 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1726 int ret = 0; 1727 1728 if (!pp_funcs->get_current_clocks) 1729 return -EOPNOTSUPP; 1730 1731 mutex_lock(&adev->pm.mutex); 1732 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1733 clocks); 1734 mutex_unlock(&adev->pm.mutex); 1735 1736 return ret; 1737 } 1738 1739 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1740 { 1741 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1742 1743 if (!pp_funcs->notify_smu_enable_pwe) 1744 return; 1745 1746 mutex_lock(&adev->pm.mutex); 1747 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1748 mutex_unlock(&adev->pm.mutex); 1749 } 1750 1751 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1752 uint32_t count) 1753 { 1754 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1755 int ret = 0; 1756 1757 if (!pp_funcs->set_active_display_count) 1758 return -EOPNOTSUPP; 1759 1760 mutex_lock(&adev->pm.mutex); 1761 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1762 count); 1763 mutex_unlock(&adev->pm.mutex); 1764 1765 return ret; 1766 } 1767 1768 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1769 uint32_t clock) 1770 { 1771 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1772 int ret = 0; 1773 1774 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1775 return -EOPNOTSUPP; 1776 1777 mutex_lock(&adev->pm.mutex); 1778 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1779 clock); 1780 mutex_unlock(&adev->pm.mutex); 1781 1782 return ret; 1783 } 1784 1785 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1786 uint32_t clock) 1787 { 1788 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1789 1790 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1791 return; 1792 1793 mutex_lock(&adev->pm.mutex); 1794 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1795 clock); 1796 mutex_unlock(&adev->pm.mutex); 1797 } 1798 1799 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1800 uint32_t clock) 1801 { 1802 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1803 1804 if (!pp_funcs->set_hard_min_fclk_by_freq) 1805 return; 1806 1807 mutex_lock(&adev->pm.mutex); 1808 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1809 clock); 1810 mutex_unlock(&adev->pm.mutex); 1811 } 1812 1813 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1814 bool disable_memory_clock_switch) 1815 { 1816 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1817 int ret = 0; 1818 1819 if (!pp_funcs->display_disable_memory_clock_switch) 1820 return 0; 1821 1822 mutex_lock(&adev->pm.mutex); 1823 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1824 disable_memory_clock_switch); 1825 mutex_unlock(&adev->pm.mutex); 1826 1827 return ret; 1828 } 1829 1830 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1831 struct pp_smu_nv_clock_table *max_clocks) 1832 { 1833 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1834 int ret = 0; 1835 1836 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1837 return -EOPNOTSUPP; 1838 1839 mutex_lock(&adev->pm.mutex); 1840 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1841 max_clocks); 1842 mutex_unlock(&adev->pm.mutex); 1843 1844 return ret; 1845 } 1846 1847 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1848 unsigned int *clock_values_in_khz, 1849 unsigned int *num_states) 1850 { 1851 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1852 int ret = 0; 1853 1854 if (!pp_funcs->get_uclk_dpm_states) 1855 return -EOPNOTSUPP; 1856 1857 mutex_lock(&adev->pm.mutex); 1858 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1859 clock_values_in_khz, 1860 num_states); 1861 mutex_unlock(&adev->pm.mutex); 1862 1863 return ret; 1864 } 1865 1866 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1867 struct dpm_clocks *clock_table) 1868 { 1869 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1870 int ret = 0; 1871 1872 if (!pp_funcs->get_dpm_clock_table) 1873 return -EOPNOTSUPP; 1874 1875 mutex_lock(&adev->pm.mutex); 1876 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1877 clock_table); 1878 mutex_unlock(&adev->pm.mutex); 1879 1880 return ret; 1881 } 1882