1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, 74 uint32_t block_type, 75 bool gate, 76 int inst) 77 { 78 int ret = 0; 79 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 80 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 81 bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN; 82 83 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state && 84 (!is_vcn || adev->vcn.num_vcn_inst == 1)) { 85 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 86 block_type, gate ? "gate" : "ungate"); 87 return 0; 88 } 89 90 mutex_lock(&adev->pm.mutex); 91 92 switch (block_type) { 93 case AMD_IP_BLOCK_TYPE_UVD: 94 case AMD_IP_BLOCK_TYPE_VCE: 95 case AMD_IP_BLOCK_TYPE_GFX: 96 case AMD_IP_BLOCK_TYPE_SDMA: 97 case AMD_IP_BLOCK_TYPE_JPEG: 98 case AMD_IP_BLOCK_TYPE_GMC: 99 case AMD_IP_BLOCK_TYPE_ACP: 100 case AMD_IP_BLOCK_TYPE_VPE: 101 if (pp_funcs && pp_funcs->set_powergating_by_smu) 102 ret = (pp_funcs->set_powergating_by_smu( 103 (adev)->powerplay.pp_handle, block_type, gate, 0)); 104 break; 105 case AMD_IP_BLOCK_TYPE_VCN: 106 if (pp_funcs && pp_funcs->set_powergating_by_smu) 107 ret = (pp_funcs->set_powergating_by_smu( 108 (adev)->powerplay.pp_handle, block_type, gate, inst)); 109 break; 110 default: 111 break; 112 } 113 114 if (!ret) 115 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 116 117 mutex_unlock(&adev->pm.mutex); 118 119 return ret; 120 } 121 122 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 123 { 124 struct smu_context *smu = adev->powerplay.pp_handle; 125 int ret = -EOPNOTSUPP; 126 127 mutex_lock(&adev->pm.mutex); 128 ret = smu_set_gfx_power_up_by_imu(smu); 129 mutex_unlock(&adev->pm.mutex); 130 131 msleep(10); 132 133 return ret; 134 } 135 136 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 137 { 138 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 139 void *pp_handle = adev->powerplay.pp_handle; 140 int ret = 0; 141 142 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 143 return -ENOENT; 144 145 mutex_lock(&adev->pm.mutex); 146 147 /* enter BACO state */ 148 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 149 150 mutex_unlock(&adev->pm.mutex); 151 152 return ret; 153 } 154 155 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 156 { 157 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 158 void *pp_handle = adev->powerplay.pp_handle; 159 int ret = 0; 160 161 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 162 return -ENOENT; 163 164 mutex_lock(&adev->pm.mutex); 165 166 /* exit BACO state */ 167 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 168 169 mutex_unlock(&adev->pm.mutex); 170 171 return ret; 172 } 173 174 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 175 enum pp_mp1_state mp1_state) 176 { 177 int ret = 0; 178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 179 180 if (mp1_state == PP_MP1_STATE_FLR) { 181 /* VF lost access to SMU */ 182 if (amdgpu_sriov_vf(adev)) 183 adev->pm.dpm_enabled = false; 184 } else if (pp_funcs && pp_funcs->set_mp1_state) { 185 mutex_lock(&adev->pm.mutex); 186 187 ret = pp_funcs->set_mp1_state( 188 adev->powerplay.pp_handle, 189 mp1_state); 190 191 mutex_unlock(&adev->pm.mutex); 192 } 193 194 return ret; 195 } 196 197 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) 198 { 199 int ret = 0; 200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 201 202 if (pp_funcs && pp_funcs->notify_rlc_state) { 203 mutex_lock(&adev->pm.mutex); 204 205 ret = pp_funcs->notify_rlc_state( 206 adev->powerplay.pp_handle, 207 en); 208 209 mutex_unlock(&adev->pm.mutex); 210 } 211 212 return ret; 213 } 214 215 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 216 { 217 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 218 void *pp_handle = adev->powerplay.pp_handle; 219 int ret; 220 221 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 222 return 0; 223 /* Don't use baco for reset in S3. 224 * This is a workaround for some platforms 225 * where entering BACO during suspend 226 * seems to cause reboots or hangs. 227 * This might be related to the fact that BACO controls 228 * power to the whole GPU including devices like audio and USB. 229 * Powering down/up everything may adversely affect these other 230 * devices. Needs more investigation. 231 */ 232 if (adev->in_s3) 233 return 0; 234 235 mutex_lock(&adev->pm.mutex); 236 237 ret = pp_funcs->get_asic_baco_capability(pp_handle); 238 239 mutex_unlock(&adev->pm.mutex); 240 241 return ret; 242 } 243 244 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 245 { 246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 247 void *pp_handle = adev->powerplay.pp_handle; 248 int ret = 0; 249 250 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 251 return -ENOENT; 252 253 mutex_lock(&adev->pm.mutex); 254 255 ret = pp_funcs->asic_reset_mode_2(pp_handle); 256 257 mutex_unlock(&adev->pm.mutex); 258 259 return ret; 260 } 261 262 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 263 { 264 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 265 void *pp_handle = adev->powerplay.pp_handle; 266 int ret = 0; 267 268 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 269 return -ENOENT; 270 271 mutex_lock(&adev->pm.mutex); 272 273 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 274 275 mutex_unlock(&adev->pm.mutex); 276 277 return ret; 278 } 279 280 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 281 { 282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 283 void *pp_handle = adev->powerplay.pp_handle; 284 int ret = 0; 285 286 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 287 return -ENOENT; 288 289 mutex_lock(&adev->pm.mutex); 290 291 /* enter BACO state */ 292 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 293 if (ret) 294 goto out; 295 296 /* exit BACO state */ 297 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 298 299 out: 300 mutex_unlock(&adev->pm.mutex); 301 return ret; 302 } 303 304 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 305 { 306 struct smu_context *smu = adev->powerplay.pp_handle; 307 bool support_mode1_reset = false; 308 309 if (is_support_sw_smu(adev)) { 310 mutex_lock(&adev->pm.mutex); 311 support_mode1_reset = smu_mode1_reset_is_support(smu); 312 mutex_unlock(&adev->pm.mutex); 313 } 314 315 return support_mode1_reset; 316 } 317 318 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 319 { 320 struct smu_context *smu = adev->powerplay.pp_handle; 321 int ret = -EOPNOTSUPP; 322 323 if (is_support_sw_smu(adev)) { 324 mutex_lock(&adev->pm.mutex); 325 ret = smu_mode1_reset(smu); 326 mutex_unlock(&adev->pm.mutex); 327 } 328 329 return ret; 330 } 331 332 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 333 enum PP_SMC_POWER_PROFILE type, 334 bool en) 335 { 336 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 337 int ret = 0; 338 339 if (amdgpu_sriov_vf(adev)) 340 return 0; 341 342 if (pp_funcs && pp_funcs->switch_power_profile) { 343 mutex_lock(&adev->pm.mutex); 344 ret = pp_funcs->switch_power_profile( 345 adev->powerplay.pp_handle, type, en); 346 mutex_unlock(&adev->pm.mutex); 347 } 348 349 return ret; 350 } 351 352 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 353 uint32_t pstate) 354 { 355 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 356 int ret = 0; 357 358 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 359 mutex_lock(&adev->pm.mutex); 360 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 361 pstate); 362 mutex_unlock(&adev->pm.mutex); 363 } 364 365 return ret; 366 } 367 368 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 369 uint32_t cstate) 370 { 371 int ret = 0; 372 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 373 void *pp_handle = adev->powerplay.pp_handle; 374 375 if (pp_funcs && pp_funcs->set_df_cstate) { 376 mutex_lock(&adev->pm.mutex); 377 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 378 mutex_unlock(&adev->pm.mutex); 379 } 380 381 return ret; 382 } 383 384 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, 385 enum pp_pm_policy p_type, char *buf) 386 { 387 struct smu_context *smu = adev->powerplay.pp_handle; 388 int ret = -EOPNOTSUPP; 389 390 if (is_support_sw_smu(adev)) { 391 mutex_lock(&adev->pm.mutex); 392 ret = smu_get_pm_policy_info(smu, p_type, buf); 393 mutex_unlock(&adev->pm.mutex); 394 } 395 396 return ret; 397 } 398 399 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, 400 int policy_level) 401 { 402 struct smu_context *smu = adev->powerplay.pp_handle; 403 int ret = -EOPNOTSUPP; 404 405 if (is_support_sw_smu(adev)) { 406 mutex_lock(&adev->pm.mutex); 407 ret = smu_set_pm_policy(smu, policy_type, policy_level); 408 mutex_unlock(&adev->pm.mutex); 409 } 410 411 return ret; 412 } 413 414 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 415 { 416 void *pp_handle = adev->powerplay.pp_handle; 417 const struct amd_pm_funcs *pp_funcs = 418 adev->powerplay.pp_funcs; 419 int ret = 0; 420 421 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 422 mutex_lock(&adev->pm.mutex); 423 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 424 mutex_unlock(&adev->pm.mutex); 425 } 426 427 return ret; 428 } 429 430 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 431 uint32_t msg_id) 432 { 433 void *pp_handle = adev->powerplay.pp_handle; 434 const struct amd_pm_funcs *pp_funcs = 435 adev->powerplay.pp_funcs; 436 int ret = 0; 437 438 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 439 mutex_lock(&adev->pm.mutex); 440 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 441 msg_id); 442 mutex_unlock(&adev->pm.mutex); 443 } 444 445 return ret; 446 } 447 448 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 449 bool acquire) 450 { 451 void *pp_handle = adev->powerplay.pp_handle; 452 const struct amd_pm_funcs *pp_funcs = 453 adev->powerplay.pp_funcs; 454 int ret = -EOPNOTSUPP; 455 456 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 457 mutex_lock(&adev->pm.mutex); 458 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 459 acquire); 460 mutex_unlock(&adev->pm.mutex); 461 } 462 463 return ret; 464 } 465 466 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 467 { 468 if (adev->pm.dpm_enabled) { 469 mutex_lock(&adev->pm.mutex); 470 if (power_supply_is_system_supplied() > 0) 471 adev->pm.ac_power = true; 472 else 473 adev->pm.ac_power = false; 474 475 if (adev->powerplay.pp_funcs && 476 adev->powerplay.pp_funcs->enable_bapm) 477 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 478 479 if (is_support_sw_smu(adev)) 480 smu_set_ac_dc(adev->powerplay.pp_handle); 481 482 mutex_unlock(&adev->pm.mutex); 483 } 484 } 485 486 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 487 void *data, uint32_t *size) 488 { 489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 490 int ret = -EINVAL; 491 492 if (!data || !size) 493 return -EINVAL; 494 495 if (pp_funcs && pp_funcs->read_sensor) { 496 mutex_lock(&adev->pm.mutex); 497 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 498 sensor, 499 data, 500 size); 501 mutex_unlock(&adev->pm.mutex); 502 } 503 504 return ret; 505 } 506 507 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 508 { 509 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 510 int ret = -EOPNOTSUPP; 511 512 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 513 mutex_lock(&adev->pm.mutex); 514 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 515 mutex_unlock(&adev->pm.mutex); 516 } 517 518 return ret; 519 } 520 521 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 522 { 523 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 524 int ret = -EOPNOTSUPP; 525 526 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 527 mutex_lock(&adev->pm.mutex); 528 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 529 mutex_unlock(&adev->pm.mutex); 530 } 531 532 return ret; 533 } 534 535 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 536 { 537 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 538 int i; 539 540 if (!adev->pm.dpm_enabled) 541 return; 542 543 if (!pp_funcs->pm_compute_clocks) 544 return; 545 546 if (adev->mode_info.num_crtc) 547 amdgpu_display_bandwidth_update(adev); 548 549 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 550 struct amdgpu_ring *ring = adev->rings[i]; 551 if (ring && ring->sched.ready) 552 amdgpu_fence_wait_empty(ring); 553 } 554 555 mutex_lock(&adev->pm.mutex); 556 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 557 mutex_unlock(&adev->pm.mutex); 558 } 559 560 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 561 { 562 int ret = 0; 563 564 if (adev->family == AMDGPU_FAMILY_SI) { 565 mutex_lock(&adev->pm.mutex); 566 if (enable) { 567 adev->pm.dpm.uvd_active = true; 568 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 569 } else { 570 adev->pm.dpm.uvd_active = false; 571 } 572 mutex_unlock(&adev->pm.mutex); 573 574 amdgpu_dpm_compute_clocks(adev); 575 return; 576 } 577 578 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0); 579 if (ret) 580 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 581 enable ? "enable" : "disable", ret); 582 } 583 584 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst) 585 { 586 int ret = 0; 587 588 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst); 589 if (ret) 590 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 591 enable ? "enable" : "disable", ret); 592 } 593 594 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 595 { 596 int ret = 0; 597 598 if (adev->family == AMDGPU_FAMILY_SI) { 599 mutex_lock(&adev->pm.mutex); 600 if (enable) { 601 adev->pm.dpm.vce_active = true; 602 /* XXX select vce level based on ring/task */ 603 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 604 } else { 605 adev->pm.dpm.vce_active = false; 606 } 607 mutex_unlock(&adev->pm.mutex); 608 609 amdgpu_dpm_compute_clocks(adev); 610 return; 611 } 612 613 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0); 614 if (ret) 615 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 616 enable ? "enable" : "disable", ret); 617 } 618 619 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 620 { 621 int ret = 0; 622 623 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0); 624 if (ret) 625 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 626 enable ? "enable" : "disable", ret); 627 } 628 629 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) 630 { 631 int ret = 0; 632 633 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0); 634 if (ret) 635 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", 636 enable ? "enable" : "disable", ret); 637 } 638 639 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 640 { 641 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 642 int r = 0; 643 644 if (!pp_funcs || !pp_funcs->load_firmware || 645 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU))) 646 return 0; 647 648 mutex_lock(&adev->pm.mutex); 649 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 650 if (r) { 651 pr_err("smu firmware loading failed\n"); 652 goto out; 653 } 654 655 if (smu_version) 656 *smu_version = adev->pm.fw_version; 657 658 out: 659 mutex_unlock(&adev->pm.mutex); 660 return r; 661 } 662 663 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 664 { 665 int ret = 0; 666 667 if (is_support_sw_smu(adev)) { 668 mutex_lock(&adev->pm.mutex); 669 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 670 enable); 671 mutex_unlock(&adev->pm.mutex); 672 } 673 674 return ret; 675 } 676 677 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 678 { 679 struct smu_context *smu = adev->powerplay.pp_handle; 680 int ret = 0; 681 682 if (!is_support_sw_smu(adev)) 683 return -EOPNOTSUPP; 684 685 mutex_lock(&adev->pm.mutex); 686 ret = smu_send_hbm_bad_pages_num(smu, size); 687 mutex_unlock(&adev->pm.mutex); 688 689 return ret; 690 } 691 692 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 693 { 694 struct smu_context *smu = adev->powerplay.pp_handle; 695 int ret = 0; 696 697 if (!is_support_sw_smu(adev)) 698 return -EOPNOTSUPP; 699 700 mutex_lock(&adev->pm.mutex); 701 ret = smu_send_hbm_bad_channel_flag(smu, size); 702 mutex_unlock(&adev->pm.mutex); 703 704 return ret; 705 } 706 707 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) 708 { 709 struct smu_context *smu = adev->powerplay.pp_handle; 710 int ret; 711 712 if (!is_support_sw_smu(adev)) 713 return -EOPNOTSUPP; 714 715 mutex_lock(&adev->pm.mutex); 716 ret = smu_send_rma_reason(smu); 717 mutex_unlock(&adev->pm.mutex); 718 719 if (amdgpu_cper_generate_bp_threshold_record(adev)) 720 dev_warn(adev->dev, "fail to generate bad page threshold cper records\n"); 721 722 return ret; 723 } 724 725 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask) 726 { 727 struct smu_context *smu = adev->powerplay.pp_handle; 728 int ret; 729 730 if (!is_support_sw_smu(adev)) 731 return -EOPNOTSUPP; 732 733 mutex_lock(&adev->pm.mutex); 734 ret = smu_reset_sdma(smu, inst_mask); 735 mutex_unlock(&adev->pm.mutex); 736 737 return ret; 738 } 739 740 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 741 enum pp_clock_type type, 742 uint32_t *min, 743 uint32_t *max) 744 { 745 int ret = 0; 746 747 if (type != PP_SCLK) 748 return -EINVAL; 749 750 if (!is_support_sw_smu(adev)) 751 return -EOPNOTSUPP; 752 753 mutex_lock(&adev->pm.mutex); 754 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 755 SMU_SCLK, 756 min, 757 max); 758 mutex_unlock(&adev->pm.mutex); 759 760 return ret; 761 } 762 763 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 764 enum pp_clock_type type, 765 uint32_t min, 766 uint32_t max) 767 { 768 struct smu_context *smu = adev->powerplay.pp_handle; 769 int ret = 0; 770 771 if (type != PP_SCLK) 772 return -EINVAL; 773 774 if (!is_support_sw_smu(adev)) 775 return -EOPNOTSUPP; 776 777 mutex_lock(&adev->pm.mutex); 778 ret = smu_set_soft_freq_range(smu, 779 SMU_SCLK, 780 min, 781 max); 782 mutex_unlock(&adev->pm.mutex); 783 784 return ret; 785 } 786 787 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 788 { 789 struct smu_context *smu = adev->powerplay.pp_handle; 790 int ret = 0; 791 792 if (!is_support_sw_smu(adev)) 793 return 0; 794 795 mutex_lock(&adev->pm.mutex); 796 ret = smu_write_watermarks_table(smu); 797 mutex_unlock(&adev->pm.mutex); 798 799 return ret; 800 } 801 802 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 803 enum smu_event_type event, 804 uint64_t event_arg) 805 { 806 struct smu_context *smu = adev->powerplay.pp_handle; 807 int ret = 0; 808 809 if (!is_support_sw_smu(adev)) 810 return -EOPNOTSUPP; 811 812 mutex_lock(&adev->pm.mutex); 813 ret = smu_wait_for_event(smu, event, event_arg); 814 mutex_unlock(&adev->pm.mutex); 815 816 return ret; 817 } 818 819 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 820 { 821 struct smu_context *smu = adev->powerplay.pp_handle; 822 int ret = 0; 823 824 if (!is_support_sw_smu(adev)) 825 return -EOPNOTSUPP; 826 827 mutex_lock(&adev->pm.mutex); 828 ret = smu_set_residency_gfxoff(smu, value); 829 mutex_unlock(&adev->pm.mutex); 830 831 return ret; 832 } 833 834 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 835 { 836 struct smu_context *smu = adev->powerplay.pp_handle; 837 int ret = 0; 838 839 if (!is_support_sw_smu(adev)) 840 return -EOPNOTSUPP; 841 842 mutex_lock(&adev->pm.mutex); 843 ret = smu_get_residency_gfxoff(smu, value); 844 mutex_unlock(&adev->pm.mutex); 845 846 return ret; 847 } 848 849 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 850 { 851 struct smu_context *smu = adev->powerplay.pp_handle; 852 int ret = 0; 853 854 if (!is_support_sw_smu(adev)) 855 return -EOPNOTSUPP; 856 857 mutex_lock(&adev->pm.mutex); 858 ret = smu_get_entrycount_gfxoff(smu, value); 859 mutex_unlock(&adev->pm.mutex); 860 861 return ret; 862 } 863 864 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 865 { 866 struct smu_context *smu = adev->powerplay.pp_handle; 867 int ret = 0; 868 869 if (!is_support_sw_smu(adev)) 870 return -EOPNOTSUPP; 871 872 mutex_lock(&adev->pm.mutex); 873 ret = smu_get_status_gfxoff(smu, value); 874 mutex_unlock(&adev->pm.mutex); 875 876 return ret; 877 } 878 879 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 880 { 881 struct smu_context *smu = adev->powerplay.pp_handle; 882 883 if (!is_support_sw_smu(adev)) 884 return 0; 885 886 return atomic64_read(&smu->throttle_int_counter); 887 } 888 889 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 890 * @adev: amdgpu_device pointer 891 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 892 * 893 */ 894 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 895 enum gfx_change_state state) 896 { 897 mutex_lock(&adev->pm.mutex); 898 if (adev->powerplay.pp_funcs && 899 adev->powerplay.pp_funcs->gfx_state_change_set) 900 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 901 (adev)->powerplay.pp_handle, state)); 902 mutex_unlock(&adev->pm.mutex); 903 } 904 905 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 906 void *umc_ecc) 907 { 908 struct smu_context *smu = adev->powerplay.pp_handle; 909 int ret = 0; 910 911 if (!is_support_sw_smu(adev)) 912 return -EOPNOTSUPP; 913 914 mutex_lock(&adev->pm.mutex); 915 ret = smu_get_ecc_info(smu, umc_ecc); 916 mutex_unlock(&adev->pm.mutex); 917 918 return ret; 919 } 920 921 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 922 uint32_t idx) 923 { 924 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 925 struct amd_vce_state *vstate = NULL; 926 927 if (!pp_funcs->get_vce_clock_state) 928 return NULL; 929 930 mutex_lock(&adev->pm.mutex); 931 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 932 idx); 933 mutex_unlock(&adev->pm.mutex); 934 935 return vstate; 936 } 937 938 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 939 enum amd_pm_state_type *state) 940 { 941 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 942 943 mutex_lock(&adev->pm.mutex); 944 945 if (!pp_funcs->get_current_power_state) { 946 *state = adev->pm.dpm.user_state; 947 goto out; 948 } 949 950 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 951 if (*state < POWER_STATE_TYPE_DEFAULT || 952 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 953 *state = adev->pm.dpm.user_state; 954 955 out: 956 mutex_unlock(&adev->pm.mutex); 957 } 958 959 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 960 enum amd_pm_state_type state) 961 { 962 mutex_lock(&adev->pm.mutex); 963 adev->pm.dpm.user_state = state; 964 mutex_unlock(&adev->pm.mutex); 965 966 if (is_support_sw_smu(adev)) 967 return; 968 969 if (amdgpu_dpm_dispatch_task(adev, 970 AMD_PP_TASK_ENABLE_USER_STATE, 971 &state) == -EOPNOTSUPP) 972 amdgpu_dpm_compute_clocks(adev); 973 } 974 975 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 976 { 977 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 978 enum amd_dpm_forced_level level; 979 980 if (!pp_funcs) 981 return AMD_DPM_FORCED_LEVEL_AUTO; 982 983 mutex_lock(&adev->pm.mutex); 984 if (pp_funcs->get_performance_level) 985 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 986 else 987 level = adev->pm.dpm.forced_level; 988 mutex_unlock(&adev->pm.mutex); 989 990 return level; 991 } 992 993 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev) 994 { 995 /* enter UMD Pstate */ 996 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 997 AMD_PG_STATE_UNGATE); 998 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 999 AMD_CG_STATE_UNGATE); 1000 } 1001 1002 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev) 1003 { 1004 /* exit UMD Pstate */ 1005 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1006 AMD_CG_STATE_GATE); 1007 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1008 AMD_PG_STATE_GATE); 1009 } 1010 1011 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 1012 enum amd_dpm_forced_level level) 1013 { 1014 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1015 enum amd_dpm_forced_level current_level; 1016 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1017 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1018 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1019 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1020 1021 if (!pp_funcs || !pp_funcs->force_performance_level) 1022 return 0; 1023 1024 if (adev->pm.dpm.thermal_active) 1025 return -EINVAL; 1026 1027 current_level = amdgpu_dpm_get_performance_level(adev); 1028 if (current_level == level) 1029 return 0; 1030 1031 if (!(current_level & profile_mode_mask) && 1032 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 1033 return -EINVAL; 1034 1035 if (adev->asic_type == CHIP_RAVEN) { 1036 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 1037 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1038 level == AMD_DPM_FORCED_LEVEL_MANUAL) 1039 amdgpu_gfx_off_ctrl(adev, false); 1040 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 1041 level != AMD_DPM_FORCED_LEVEL_MANUAL) 1042 amdgpu_gfx_off_ctrl(adev, true); 1043 } 1044 } 1045 1046 if (!(current_level & profile_mode_mask) && (level & profile_mode_mask)) 1047 amdgpu_dpm_enter_umd_state(adev); 1048 else if ((current_level & profile_mode_mask) && 1049 !(level & profile_mode_mask)) 1050 amdgpu_dpm_exit_umd_state(adev); 1051 1052 mutex_lock(&adev->pm.mutex); 1053 1054 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 1055 level)) { 1056 mutex_unlock(&adev->pm.mutex); 1057 /* If new level failed, retain the umd state as before */ 1058 if (!(current_level & profile_mode_mask) && 1059 (level & profile_mode_mask)) 1060 amdgpu_dpm_exit_umd_state(adev); 1061 else if ((current_level & profile_mode_mask) && 1062 !(level & profile_mode_mask)) 1063 amdgpu_dpm_enter_umd_state(adev); 1064 1065 return -EINVAL; 1066 } 1067 1068 adev->pm.dpm.forced_level = level; 1069 1070 mutex_unlock(&adev->pm.mutex); 1071 1072 return 0; 1073 } 1074 1075 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 1076 struct pp_states_info *states) 1077 { 1078 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1079 int ret = 0; 1080 1081 if (!pp_funcs->get_pp_num_states) 1082 return -EOPNOTSUPP; 1083 1084 mutex_lock(&adev->pm.mutex); 1085 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 1086 states); 1087 mutex_unlock(&adev->pm.mutex); 1088 1089 return ret; 1090 } 1091 1092 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 1093 enum amd_pp_task task_id, 1094 enum amd_pm_state_type *user_state) 1095 { 1096 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1097 int ret = 0; 1098 1099 if (!pp_funcs->dispatch_tasks) 1100 return -EOPNOTSUPP; 1101 1102 mutex_lock(&adev->pm.mutex); 1103 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 1104 task_id, 1105 user_state); 1106 mutex_unlock(&adev->pm.mutex); 1107 1108 return ret; 1109 } 1110 1111 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1112 { 1113 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1114 int ret = 0; 1115 1116 if (!pp_funcs->get_pp_table) 1117 return 0; 1118 1119 mutex_lock(&adev->pm.mutex); 1120 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1121 table); 1122 mutex_unlock(&adev->pm.mutex); 1123 1124 return ret; 1125 } 1126 1127 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1128 uint32_t type, 1129 long *input, 1130 uint32_t size) 1131 { 1132 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1133 int ret = 0; 1134 1135 if (!pp_funcs->set_fine_grain_clk_vol) 1136 return 0; 1137 1138 mutex_lock(&adev->pm.mutex); 1139 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1140 type, 1141 input, 1142 size); 1143 mutex_unlock(&adev->pm.mutex); 1144 1145 return ret; 1146 } 1147 1148 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1149 uint32_t type, 1150 long *input, 1151 uint32_t size) 1152 { 1153 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1154 int ret = 0; 1155 1156 if (!pp_funcs->odn_edit_dpm_table) 1157 return 0; 1158 1159 mutex_lock(&adev->pm.mutex); 1160 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1161 type, 1162 input, 1163 size); 1164 mutex_unlock(&adev->pm.mutex); 1165 1166 return ret; 1167 } 1168 1169 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1170 enum pp_clock_type type, 1171 char *buf) 1172 { 1173 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1174 int ret = 0; 1175 1176 if (!pp_funcs->print_clock_levels) 1177 return 0; 1178 1179 mutex_lock(&adev->pm.mutex); 1180 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1181 type, 1182 buf); 1183 mutex_unlock(&adev->pm.mutex); 1184 1185 return ret; 1186 } 1187 1188 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1189 enum pp_clock_type type, 1190 char *buf, 1191 int *offset) 1192 { 1193 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1194 int ret = 0; 1195 1196 if (!pp_funcs->emit_clock_levels) 1197 return -ENOENT; 1198 1199 mutex_lock(&adev->pm.mutex); 1200 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1201 type, 1202 buf, 1203 offset); 1204 mutex_unlock(&adev->pm.mutex); 1205 1206 return ret; 1207 } 1208 1209 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1210 uint64_t ppfeature_masks) 1211 { 1212 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1213 int ret = 0; 1214 1215 if (!pp_funcs->set_ppfeature_status) 1216 return 0; 1217 1218 mutex_lock(&adev->pm.mutex); 1219 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1220 ppfeature_masks); 1221 mutex_unlock(&adev->pm.mutex); 1222 1223 return ret; 1224 } 1225 1226 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1227 { 1228 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1229 int ret = 0; 1230 1231 if (!pp_funcs->get_ppfeature_status) 1232 return 0; 1233 1234 mutex_lock(&adev->pm.mutex); 1235 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1236 buf); 1237 mutex_unlock(&adev->pm.mutex); 1238 1239 return ret; 1240 } 1241 1242 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1243 enum pp_clock_type type, 1244 uint32_t mask) 1245 { 1246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1247 int ret = 0; 1248 1249 if (!pp_funcs->force_clock_level) 1250 return 0; 1251 1252 mutex_lock(&adev->pm.mutex); 1253 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1254 type, 1255 mask); 1256 mutex_unlock(&adev->pm.mutex); 1257 1258 return ret; 1259 } 1260 1261 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1262 { 1263 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1264 int ret = 0; 1265 1266 if (!pp_funcs->get_sclk_od) 1267 return -EOPNOTSUPP; 1268 1269 mutex_lock(&adev->pm.mutex); 1270 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1271 mutex_unlock(&adev->pm.mutex); 1272 1273 return ret; 1274 } 1275 1276 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1277 { 1278 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1279 1280 if (is_support_sw_smu(adev)) 1281 return -EOPNOTSUPP; 1282 1283 mutex_lock(&adev->pm.mutex); 1284 if (pp_funcs->set_sclk_od) 1285 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1286 mutex_unlock(&adev->pm.mutex); 1287 1288 if (amdgpu_dpm_dispatch_task(adev, 1289 AMD_PP_TASK_READJUST_POWER_STATE, 1290 NULL) == -EOPNOTSUPP) { 1291 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1292 amdgpu_dpm_compute_clocks(adev); 1293 } 1294 1295 return 0; 1296 } 1297 1298 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1299 { 1300 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1301 int ret = 0; 1302 1303 if (!pp_funcs->get_mclk_od) 1304 return -EOPNOTSUPP; 1305 1306 mutex_lock(&adev->pm.mutex); 1307 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1308 mutex_unlock(&adev->pm.mutex); 1309 1310 return ret; 1311 } 1312 1313 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1314 { 1315 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1316 1317 if (is_support_sw_smu(adev)) 1318 return -EOPNOTSUPP; 1319 1320 mutex_lock(&adev->pm.mutex); 1321 if (pp_funcs->set_mclk_od) 1322 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1323 mutex_unlock(&adev->pm.mutex); 1324 1325 if (amdgpu_dpm_dispatch_task(adev, 1326 AMD_PP_TASK_READJUST_POWER_STATE, 1327 NULL) == -EOPNOTSUPP) { 1328 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1329 amdgpu_dpm_compute_clocks(adev); 1330 } 1331 1332 return 0; 1333 } 1334 1335 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1336 char *buf) 1337 { 1338 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1339 int ret = 0; 1340 1341 if (!pp_funcs->get_power_profile_mode) 1342 return -EOPNOTSUPP; 1343 1344 mutex_lock(&adev->pm.mutex); 1345 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1346 buf); 1347 mutex_unlock(&adev->pm.mutex); 1348 1349 return ret; 1350 } 1351 1352 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1353 long *input, uint32_t size) 1354 { 1355 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1356 int ret = 0; 1357 1358 if (!pp_funcs->set_power_profile_mode) 1359 return 0; 1360 1361 mutex_lock(&adev->pm.mutex); 1362 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1363 input, 1364 size); 1365 mutex_unlock(&adev->pm.mutex); 1366 1367 return ret; 1368 } 1369 1370 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1371 { 1372 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1373 int ret = 0; 1374 1375 if (!pp_funcs->get_gpu_metrics) 1376 return 0; 1377 1378 mutex_lock(&adev->pm.mutex); 1379 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1380 table); 1381 mutex_unlock(&adev->pm.mutex); 1382 1383 return ret; 1384 } 1385 1386 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, 1387 size_t size) 1388 { 1389 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1390 int ret = 0; 1391 1392 if (!pp_funcs->get_pm_metrics) 1393 return -EOPNOTSUPP; 1394 1395 mutex_lock(&adev->pm.mutex); 1396 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, 1397 size); 1398 mutex_unlock(&adev->pm.mutex); 1399 1400 return ret; 1401 } 1402 1403 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1404 uint32_t *fan_mode) 1405 { 1406 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1407 int ret = 0; 1408 1409 if (!pp_funcs->get_fan_control_mode) 1410 return -EOPNOTSUPP; 1411 1412 mutex_lock(&adev->pm.mutex); 1413 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1414 fan_mode); 1415 mutex_unlock(&adev->pm.mutex); 1416 1417 return ret; 1418 } 1419 1420 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1421 uint32_t speed) 1422 { 1423 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1424 int ret = 0; 1425 1426 if (!pp_funcs->set_fan_speed_pwm) 1427 return -EOPNOTSUPP; 1428 1429 mutex_lock(&adev->pm.mutex); 1430 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1431 speed); 1432 mutex_unlock(&adev->pm.mutex); 1433 1434 return ret; 1435 } 1436 1437 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1438 uint32_t *speed) 1439 { 1440 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1441 int ret = 0; 1442 1443 if (!pp_funcs->get_fan_speed_pwm) 1444 return -EOPNOTSUPP; 1445 1446 mutex_lock(&adev->pm.mutex); 1447 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1448 speed); 1449 mutex_unlock(&adev->pm.mutex); 1450 1451 return ret; 1452 } 1453 1454 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1455 uint32_t *speed) 1456 { 1457 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1458 int ret = 0; 1459 1460 if (!pp_funcs->get_fan_speed_rpm) 1461 return -EOPNOTSUPP; 1462 1463 mutex_lock(&adev->pm.mutex); 1464 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1465 speed); 1466 mutex_unlock(&adev->pm.mutex); 1467 1468 return ret; 1469 } 1470 1471 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1472 uint32_t speed) 1473 { 1474 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1475 int ret = 0; 1476 1477 if (!pp_funcs->set_fan_speed_rpm) 1478 return -EOPNOTSUPP; 1479 1480 mutex_lock(&adev->pm.mutex); 1481 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1482 speed); 1483 mutex_unlock(&adev->pm.mutex); 1484 1485 return ret; 1486 } 1487 1488 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1489 uint32_t mode) 1490 { 1491 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1492 int ret = 0; 1493 1494 if (!pp_funcs->set_fan_control_mode) 1495 return -EOPNOTSUPP; 1496 1497 mutex_lock(&adev->pm.mutex); 1498 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1499 mode); 1500 mutex_unlock(&adev->pm.mutex); 1501 1502 return ret; 1503 } 1504 1505 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1506 uint32_t *limit, 1507 enum pp_power_limit_level pp_limit_level, 1508 enum pp_power_type power_type) 1509 { 1510 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1511 int ret = 0; 1512 1513 if (!pp_funcs->get_power_limit) 1514 return -ENODATA; 1515 1516 mutex_lock(&adev->pm.mutex); 1517 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1518 limit, 1519 pp_limit_level, 1520 power_type); 1521 mutex_unlock(&adev->pm.mutex); 1522 1523 return ret; 1524 } 1525 1526 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1527 uint32_t limit) 1528 { 1529 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1530 int ret = 0; 1531 1532 if (!pp_funcs->set_power_limit) 1533 return -EINVAL; 1534 1535 mutex_lock(&adev->pm.mutex); 1536 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1537 limit); 1538 mutex_unlock(&adev->pm.mutex); 1539 1540 return ret; 1541 } 1542 1543 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1544 { 1545 bool cclk_dpm_supported = false; 1546 1547 if (!is_support_sw_smu(adev)) 1548 return false; 1549 1550 mutex_lock(&adev->pm.mutex); 1551 cclk_dpm_supported = is_support_cclk_dpm(adev); 1552 mutex_unlock(&adev->pm.mutex); 1553 1554 return (int)cclk_dpm_supported; 1555 } 1556 1557 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1558 struct seq_file *m) 1559 { 1560 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1561 1562 if (!pp_funcs->debugfs_print_current_performance_level) 1563 return -EOPNOTSUPP; 1564 1565 mutex_lock(&adev->pm.mutex); 1566 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1567 m); 1568 mutex_unlock(&adev->pm.mutex); 1569 1570 return 0; 1571 } 1572 1573 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1574 void **addr, 1575 size_t *size) 1576 { 1577 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1578 int ret = 0; 1579 1580 if (!pp_funcs->get_smu_prv_buf_details) 1581 return -ENOSYS; 1582 1583 mutex_lock(&adev->pm.mutex); 1584 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1585 addr, 1586 size); 1587 mutex_unlock(&adev->pm.mutex); 1588 1589 return ret; 1590 } 1591 1592 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1593 { 1594 if (is_support_sw_smu(adev)) { 1595 struct smu_context *smu = adev->powerplay.pp_handle; 1596 1597 return (smu->od_enabled || smu->is_apu); 1598 } else { 1599 struct pp_hwmgr *hwmgr; 1600 1601 /* 1602 * dpm on some legacy asics don't carry od_enabled member 1603 * as its pp_handle is casted directly from adev. 1604 */ 1605 if (amdgpu_dpm_is_legacy_dpm(adev)) 1606 return false; 1607 1608 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1609 1610 return hwmgr->od_enabled; 1611 } 1612 } 1613 1614 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1615 const char *buf, 1616 size_t size) 1617 { 1618 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1619 int ret = 0; 1620 1621 if (!pp_funcs->set_pp_table) 1622 return -EOPNOTSUPP; 1623 1624 mutex_lock(&adev->pm.mutex); 1625 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1626 buf, 1627 size); 1628 mutex_unlock(&adev->pm.mutex); 1629 1630 return ret; 1631 } 1632 1633 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1634 { 1635 struct smu_context *smu = adev->powerplay.pp_handle; 1636 1637 if (!is_support_sw_smu(adev)) 1638 return INT_MAX; 1639 1640 return smu->cpu_core_num; 1641 } 1642 1643 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1644 { 1645 if (!is_support_sw_smu(adev)) 1646 return; 1647 1648 amdgpu_smu_stb_debug_fs_init(adev); 1649 } 1650 1651 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1652 const struct amd_pp_display_configuration *input) 1653 { 1654 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1655 int ret = 0; 1656 1657 if (!pp_funcs->display_configuration_change) 1658 return 0; 1659 1660 mutex_lock(&adev->pm.mutex); 1661 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1662 input); 1663 mutex_unlock(&adev->pm.mutex); 1664 1665 return ret; 1666 } 1667 1668 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1669 enum amd_pp_clock_type type, 1670 struct amd_pp_clocks *clocks) 1671 { 1672 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1673 int ret = 0; 1674 1675 if (!pp_funcs->get_clock_by_type) 1676 return 0; 1677 1678 mutex_lock(&adev->pm.mutex); 1679 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1680 type, 1681 clocks); 1682 mutex_unlock(&adev->pm.mutex); 1683 1684 return ret; 1685 } 1686 1687 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1688 struct amd_pp_simple_clock_info *clocks) 1689 { 1690 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1691 int ret = 0; 1692 1693 if (!pp_funcs->get_display_mode_validation_clocks) 1694 return 0; 1695 1696 mutex_lock(&adev->pm.mutex); 1697 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1698 clocks); 1699 mutex_unlock(&adev->pm.mutex); 1700 1701 return ret; 1702 } 1703 1704 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1705 enum amd_pp_clock_type type, 1706 struct pp_clock_levels_with_latency *clocks) 1707 { 1708 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1709 int ret = 0; 1710 1711 if (!pp_funcs->get_clock_by_type_with_latency) 1712 return 0; 1713 1714 mutex_lock(&adev->pm.mutex); 1715 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1716 type, 1717 clocks); 1718 mutex_unlock(&adev->pm.mutex); 1719 1720 return ret; 1721 } 1722 1723 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1724 enum amd_pp_clock_type type, 1725 struct pp_clock_levels_with_voltage *clocks) 1726 { 1727 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1728 int ret = 0; 1729 1730 if (!pp_funcs->get_clock_by_type_with_voltage) 1731 return 0; 1732 1733 mutex_lock(&adev->pm.mutex); 1734 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1735 type, 1736 clocks); 1737 mutex_unlock(&adev->pm.mutex); 1738 1739 return ret; 1740 } 1741 1742 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1743 void *clock_ranges) 1744 { 1745 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1746 int ret = 0; 1747 1748 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1749 return -EOPNOTSUPP; 1750 1751 mutex_lock(&adev->pm.mutex); 1752 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1753 clock_ranges); 1754 mutex_unlock(&adev->pm.mutex); 1755 1756 return ret; 1757 } 1758 1759 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1760 struct pp_display_clock_request *clock) 1761 { 1762 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1763 int ret = 0; 1764 1765 if (!pp_funcs->display_clock_voltage_request) 1766 return -EOPNOTSUPP; 1767 1768 mutex_lock(&adev->pm.mutex); 1769 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1770 clock); 1771 mutex_unlock(&adev->pm.mutex); 1772 1773 return ret; 1774 } 1775 1776 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1777 struct amd_pp_clock_info *clocks) 1778 { 1779 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1780 int ret = 0; 1781 1782 if (!pp_funcs->get_current_clocks) 1783 return -EOPNOTSUPP; 1784 1785 mutex_lock(&adev->pm.mutex); 1786 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1787 clocks); 1788 mutex_unlock(&adev->pm.mutex); 1789 1790 return ret; 1791 } 1792 1793 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1794 { 1795 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1796 1797 if (!pp_funcs->notify_smu_enable_pwe) 1798 return; 1799 1800 mutex_lock(&adev->pm.mutex); 1801 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1802 mutex_unlock(&adev->pm.mutex); 1803 } 1804 1805 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1806 uint32_t count) 1807 { 1808 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1809 int ret = 0; 1810 1811 if (!pp_funcs->set_active_display_count) 1812 return -EOPNOTSUPP; 1813 1814 mutex_lock(&adev->pm.mutex); 1815 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1816 count); 1817 mutex_unlock(&adev->pm.mutex); 1818 1819 return ret; 1820 } 1821 1822 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1823 uint32_t clock) 1824 { 1825 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1826 int ret = 0; 1827 1828 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1829 return -EOPNOTSUPP; 1830 1831 mutex_lock(&adev->pm.mutex); 1832 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1833 clock); 1834 mutex_unlock(&adev->pm.mutex); 1835 1836 return ret; 1837 } 1838 1839 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1840 uint32_t clock) 1841 { 1842 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1843 1844 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1845 return; 1846 1847 mutex_lock(&adev->pm.mutex); 1848 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1849 clock); 1850 mutex_unlock(&adev->pm.mutex); 1851 } 1852 1853 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1854 uint32_t clock) 1855 { 1856 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1857 1858 if (!pp_funcs->set_hard_min_fclk_by_freq) 1859 return; 1860 1861 mutex_lock(&adev->pm.mutex); 1862 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1863 clock); 1864 mutex_unlock(&adev->pm.mutex); 1865 } 1866 1867 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1868 bool disable_memory_clock_switch) 1869 { 1870 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1871 int ret = 0; 1872 1873 if (!pp_funcs->display_disable_memory_clock_switch) 1874 return 0; 1875 1876 mutex_lock(&adev->pm.mutex); 1877 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1878 disable_memory_clock_switch); 1879 mutex_unlock(&adev->pm.mutex); 1880 1881 return ret; 1882 } 1883 1884 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1885 struct pp_smu_nv_clock_table *max_clocks) 1886 { 1887 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1888 int ret = 0; 1889 1890 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1891 return -EOPNOTSUPP; 1892 1893 mutex_lock(&adev->pm.mutex); 1894 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1895 max_clocks); 1896 mutex_unlock(&adev->pm.mutex); 1897 1898 return ret; 1899 } 1900 1901 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1902 unsigned int *clock_values_in_khz, 1903 unsigned int *num_states) 1904 { 1905 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1906 int ret = 0; 1907 1908 if (!pp_funcs->get_uclk_dpm_states) 1909 return -EOPNOTSUPP; 1910 1911 mutex_lock(&adev->pm.mutex); 1912 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1913 clock_values_in_khz, 1914 num_states); 1915 mutex_unlock(&adev->pm.mutex); 1916 1917 return ret; 1918 } 1919 1920 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1921 struct dpm_clocks *clock_table) 1922 { 1923 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1924 int ret = 0; 1925 1926 if (!pp_funcs->get_dpm_clock_table) 1927 return -EOPNOTSUPP; 1928 1929 mutex_lock(&adev->pm.mutex); 1930 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1931 clock_table); 1932 mutex_unlock(&adev->pm.mutex); 1933 1934 return ret; 1935 } 1936