1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, 74 uint32_t block_type, 75 bool gate, 76 int inst) 77 { 78 int ret = 0; 79 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 80 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 81 bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN; 82 83 mutex_lock(&adev->pm.mutex); 84 85 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state && 86 (!is_vcn || adev->vcn.num_vcn_inst == 1)) { 87 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 88 block_type, gate ? "gate" : "ungate"); 89 goto out_unlock; 90 } 91 92 switch (block_type) { 93 case AMD_IP_BLOCK_TYPE_UVD: 94 case AMD_IP_BLOCK_TYPE_VCE: 95 case AMD_IP_BLOCK_TYPE_GFX: 96 case AMD_IP_BLOCK_TYPE_SDMA: 97 case AMD_IP_BLOCK_TYPE_JPEG: 98 case AMD_IP_BLOCK_TYPE_GMC: 99 case AMD_IP_BLOCK_TYPE_ACP: 100 case AMD_IP_BLOCK_TYPE_VPE: 101 case AMD_IP_BLOCK_TYPE_ISP: 102 if (pp_funcs && pp_funcs->set_powergating_by_smu) 103 ret = (pp_funcs->set_powergating_by_smu( 104 (adev)->powerplay.pp_handle, block_type, gate, 0)); 105 break; 106 case AMD_IP_BLOCK_TYPE_VCN: 107 if (pp_funcs && pp_funcs->set_powergating_by_smu) 108 ret = (pp_funcs->set_powergating_by_smu( 109 (adev)->powerplay.pp_handle, block_type, gate, inst)); 110 break; 111 default: 112 break; 113 } 114 115 if (!ret) 116 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 117 118 out_unlock: 119 mutex_unlock(&adev->pm.mutex); 120 121 return ret; 122 } 123 124 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 125 { 126 struct smu_context *smu = adev->powerplay.pp_handle; 127 int ret = -EOPNOTSUPP; 128 129 mutex_lock(&adev->pm.mutex); 130 ret = smu_set_gfx_power_up_by_imu(smu); 131 mutex_unlock(&adev->pm.mutex); 132 133 msleep(10); 134 135 return ret; 136 } 137 138 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 139 { 140 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 141 void *pp_handle = adev->powerplay.pp_handle; 142 int ret = 0; 143 144 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 145 return -ENOENT; 146 147 mutex_lock(&adev->pm.mutex); 148 149 /* enter BACO state */ 150 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 151 152 mutex_unlock(&adev->pm.mutex); 153 154 return ret; 155 } 156 157 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 158 { 159 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 160 void *pp_handle = adev->powerplay.pp_handle; 161 int ret = 0; 162 163 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 164 return -ENOENT; 165 166 mutex_lock(&adev->pm.mutex); 167 168 /* exit BACO state */ 169 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 170 171 mutex_unlock(&adev->pm.mutex); 172 173 return ret; 174 } 175 176 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 177 enum pp_mp1_state mp1_state) 178 { 179 int ret = 0; 180 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 181 182 if (mp1_state == PP_MP1_STATE_FLR) { 183 /* VF lost access to SMU */ 184 if (amdgpu_sriov_vf(adev)) 185 adev->pm.dpm_enabled = false; 186 } else if (pp_funcs && pp_funcs->set_mp1_state) { 187 mutex_lock(&adev->pm.mutex); 188 189 ret = pp_funcs->set_mp1_state( 190 adev->powerplay.pp_handle, 191 mp1_state); 192 193 mutex_unlock(&adev->pm.mutex); 194 } 195 196 return ret; 197 } 198 199 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 200 { 201 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 202 void *pp_handle = adev->powerplay.pp_handle; 203 int ret; 204 205 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 206 return 0; 207 /* Don't use baco for reset in S3. 208 * This is a workaround for some platforms 209 * where entering BACO during suspend 210 * seems to cause reboots or hangs. 211 * This might be related to the fact that BACO controls 212 * power to the whole GPU including devices like audio and USB. 213 * Powering down/up everything may adversely affect these other 214 * devices. Needs more investigation. 215 */ 216 if (adev->in_s3) 217 return 0; 218 219 mutex_lock(&adev->pm.mutex); 220 221 ret = pp_funcs->get_asic_baco_capability(pp_handle); 222 223 mutex_unlock(&adev->pm.mutex); 224 225 return ret; 226 } 227 228 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 229 { 230 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 231 void *pp_handle = adev->powerplay.pp_handle; 232 int ret = 0; 233 234 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 235 return -ENOENT; 236 237 mutex_lock(&adev->pm.mutex); 238 239 ret = pp_funcs->asic_reset_mode_2(pp_handle); 240 241 mutex_unlock(&adev->pm.mutex); 242 243 return ret; 244 } 245 246 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 247 { 248 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 249 void *pp_handle = adev->powerplay.pp_handle; 250 int ret = 0; 251 252 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 253 return -ENOENT; 254 255 mutex_lock(&adev->pm.mutex); 256 257 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 258 259 mutex_unlock(&adev->pm.mutex); 260 261 return ret; 262 } 263 264 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 265 { 266 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 267 void *pp_handle = adev->powerplay.pp_handle; 268 int ret = 0; 269 270 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 271 return -ENOENT; 272 273 mutex_lock(&adev->pm.mutex); 274 275 /* enter BACO state */ 276 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 277 if (ret) 278 goto out; 279 280 /* exit BACO state */ 281 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 282 283 out: 284 mutex_unlock(&adev->pm.mutex); 285 return ret; 286 } 287 288 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 289 { 290 struct smu_context *smu = adev->powerplay.pp_handle; 291 bool support_mode1_reset = false; 292 293 if (is_support_sw_smu(adev)) { 294 mutex_lock(&adev->pm.mutex); 295 support_mode1_reset = smu_mode1_reset_is_support(smu); 296 mutex_unlock(&adev->pm.mutex); 297 } 298 299 return support_mode1_reset; 300 } 301 302 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 303 { 304 struct smu_context *smu = adev->powerplay.pp_handle; 305 int ret = -EOPNOTSUPP; 306 307 if (is_support_sw_smu(adev)) { 308 mutex_lock(&adev->pm.mutex); 309 ret = smu_mode1_reset(smu); 310 mutex_unlock(&adev->pm.mutex); 311 } 312 313 return ret; 314 } 315 316 bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev) 317 { 318 struct smu_context *smu = adev->powerplay.pp_handle; 319 bool support_link_reset = false; 320 321 if (is_support_sw_smu(adev)) { 322 mutex_lock(&adev->pm.mutex); 323 support_link_reset = smu_link_reset_is_support(smu); 324 mutex_unlock(&adev->pm.mutex); 325 } 326 327 return support_link_reset; 328 } 329 330 int amdgpu_dpm_link_reset(struct amdgpu_device *adev) 331 { 332 struct smu_context *smu = adev->powerplay.pp_handle; 333 int ret = -EOPNOTSUPP; 334 335 if (is_support_sw_smu(adev)) { 336 mutex_lock(&adev->pm.mutex); 337 ret = smu_link_reset(smu); 338 mutex_unlock(&adev->pm.mutex); 339 } 340 341 return ret; 342 } 343 344 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 345 enum PP_SMC_POWER_PROFILE type, 346 bool en) 347 { 348 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 349 int ret = 0; 350 351 if (amdgpu_sriov_vf(adev)) 352 return 0; 353 354 if (pp_funcs && pp_funcs->switch_power_profile) { 355 mutex_lock(&adev->pm.mutex); 356 ret = pp_funcs->switch_power_profile( 357 adev->powerplay.pp_handle, type, en); 358 mutex_unlock(&adev->pm.mutex); 359 } 360 361 return ret; 362 } 363 364 int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev, 365 bool pause) 366 { 367 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 368 int ret = 0; 369 370 if (amdgpu_sriov_vf(adev)) 371 return 0; 372 373 if (pp_funcs && pp_funcs->pause_power_profile) { 374 mutex_lock(&adev->pm.mutex); 375 ret = pp_funcs->pause_power_profile( 376 adev->powerplay.pp_handle, pause); 377 mutex_unlock(&adev->pm.mutex); 378 } 379 380 return ret; 381 } 382 383 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 384 uint32_t pstate) 385 { 386 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 387 int ret = 0; 388 389 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 390 mutex_lock(&adev->pm.mutex); 391 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 392 pstate); 393 mutex_unlock(&adev->pm.mutex); 394 } 395 396 return ret; 397 } 398 399 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 400 uint32_t cstate) 401 { 402 int ret = 0; 403 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 404 void *pp_handle = adev->powerplay.pp_handle; 405 406 if (pp_funcs && pp_funcs->set_df_cstate) { 407 mutex_lock(&adev->pm.mutex); 408 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 409 mutex_unlock(&adev->pm.mutex); 410 } 411 412 return ret; 413 } 414 415 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, 416 enum pp_pm_policy p_type, char *buf) 417 { 418 struct smu_context *smu = adev->powerplay.pp_handle; 419 int ret = -EOPNOTSUPP; 420 421 if (is_support_sw_smu(adev)) { 422 mutex_lock(&adev->pm.mutex); 423 ret = smu_get_pm_policy_info(smu, p_type, buf); 424 mutex_unlock(&adev->pm.mutex); 425 } 426 427 return ret; 428 } 429 430 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, 431 int policy_level) 432 { 433 struct smu_context *smu = adev->powerplay.pp_handle; 434 int ret = -EOPNOTSUPP; 435 436 if (is_support_sw_smu(adev)) { 437 mutex_lock(&adev->pm.mutex); 438 ret = smu_set_pm_policy(smu, policy_type, policy_level); 439 mutex_unlock(&adev->pm.mutex); 440 } 441 442 return ret; 443 } 444 445 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 446 { 447 void *pp_handle = adev->powerplay.pp_handle; 448 const struct amd_pm_funcs *pp_funcs = 449 adev->powerplay.pp_funcs; 450 int ret = 0; 451 452 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 453 mutex_lock(&adev->pm.mutex); 454 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 455 mutex_unlock(&adev->pm.mutex); 456 } 457 458 return ret; 459 } 460 461 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 462 uint32_t msg_id) 463 { 464 void *pp_handle = adev->powerplay.pp_handle; 465 const struct amd_pm_funcs *pp_funcs = 466 adev->powerplay.pp_funcs; 467 int ret = 0; 468 469 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 470 mutex_lock(&adev->pm.mutex); 471 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 472 msg_id); 473 mutex_unlock(&adev->pm.mutex); 474 } 475 476 return ret; 477 } 478 479 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 480 bool acquire) 481 { 482 void *pp_handle = adev->powerplay.pp_handle; 483 const struct amd_pm_funcs *pp_funcs = 484 adev->powerplay.pp_funcs; 485 int ret = -EOPNOTSUPP; 486 487 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 488 mutex_lock(&adev->pm.mutex); 489 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 490 acquire); 491 mutex_unlock(&adev->pm.mutex); 492 } 493 494 return ret; 495 } 496 497 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 498 { 499 if (adev->pm.dpm_enabled) { 500 mutex_lock(&adev->pm.mutex); 501 if (power_supply_is_system_supplied() > 0) 502 adev->pm.ac_power = true; 503 else 504 adev->pm.ac_power = false; 505 506 if (adev->powerplay.pp_funcs && 507 adev->powerplay.pp_funcs->enable_bapm) 508 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 509 510 if (is_support_sw_smu(adev)) 511 smu_set_ac_dc(adev->powerplay.pp_handle); 512 513 mutex_unlock(&adev->pm.mutex); 514 } 515 } 516 517 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 518 void *data, uint32_t *size) 519 { 520 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 521 int ret = -EINVAL; 522 523 if (!data || !size) 524 return -EINVAL; 525 526 if (pp_funcs && pp_funcs->read_sensor) { 527 mutex_lock(&adev->pm.mutex); 528 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 529 sensor, 530 data, 531 size); 532 mutex_unlock(&adev->pm.mutex); 533 } 534 535 return ret; 536 } 537 538 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 539 { 540 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 541 int ret = -EOPNOTSUPP; 542 543 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 544 mutex_lock(&adev->pm.mutex); 545 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 546 mutex_unlock(&adev->pm.mutex); 547 } 548 549 return ret; 550 } 551 552 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 553 { 554 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 555 int ret = -EOPNOTSUPP; 556 557 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 558 mutex_lock(&adev->pm.mutex); 559 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 560 mutex_unlock(&adev->pm.mutex); 561 } 562 563 return ret; 564 } 565 566 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 567 { 568 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 569 int i; 570 571 if (!adev->pm.dpm_enabled) 572 return; 573 574 if (!pp_funcs->pm_compute_clocks) 575 return; 576 577 if (adev->mode_info.num_crtc) 578 amdgpu_display_bandwidth_update(adev); 579 580 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 581 struct amdgpu_ring *ring = adev->rings[i]; 582 if (ring && ring->sched.ready) 583 amdgpu_fence_wait_empty(ring); 584 } 585 586 mutex_lock(&adev->pm.mutex); 587 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 588 mutex_unlock(&adev->pm.mutex); 589 } 590 591 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 592 { 593 int ret = 0; 594 595 if (adev->family == AMDGPU_FAMILY_SI) { 596 mutex_lock(&adev->pm.mutex); 597 if (enable) { 598 adev->pm.dpm.uvd_active = true; 599 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 600 } else { 601 adev->pm.dpm.uvd_active = false; 602 } 603 mutex_unlock(&adev->pm.mutex); 604 605 amdgpu_dpm_compute_clocks(adev); 606 return; 607 } 608 609 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0); 610 if (ret) 611 drm_err(adev_to_drm(adev), "DPM %s uvd failed, ret = %d.\n", 612 enable ? "enable" : "disable", ret); 613 } 614 615 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst) 616 { 617 int ret = 0; 618 619 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst); 620 if (ret) 621 drm_err(adev_to_drm(adev), "DPM %s vcn failed, ret = %d.\n", 622 enable ? "enable" : "disable", ret); 623 } 624 625 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 626 { 627 int ret = 0; 628 629 if (adev->family == AMDGPU_FAMILY_SI) { 630 mutex_lock(&adev->pm.mutex); 631 if (enable) { 632 adev->pm.dpm.vce_active = true; 633 /* XXX select vce level based on ring/task */ 634 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 635 } else { 636 adev->pm.dpm.vce_active = false; 637 } 638 mutex_unlock(&adev->pm.mutex); 639 640 amdgpu_dpm_compute_clocks(adev); 641 return; 642 } 643 644 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0); 645 if (ret) 646 drm_err(adev_to_drm(adev), "DPM %s vce failed, ret = %d.\n", 647 enable ? "enable" : "disable", ret); 648 } 649 650 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 651 { 652 int ret = 0; 653 654 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0); 655 if (ret) 656 drm_err(adev_to_drm(adev), "Dpm %s jpeg failed, ret = %d.\n", 657 enable ? "enable" : "disable", ret); 658 } 659 660 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) 661 { 662 int ret = 0; 663 664 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0); 665 if (ret) 666 drm_err(adev_to_drm(adev), "DPM %s vpe failed, ret = %d.\n", 667 enable ? "enable" : "disable", ret); 668 } 669 670 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 671 { 672 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 673 int r = 0; 674 675 if (!pp_funcs || !pp_funcs->load_firmware || 676 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU))) 677 return 0; 678 679 mutex_lock(&adev->pm.mutex); 680 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 681 if (r) { 682 pr_err("smu firmware loading failed\n"); 683 goto out; 684 } 685 686 if (smu_version) 687 *smu_version = adev->pm.fw_version; 688 689 out: 690 mutex_unlock(&adev->pm.mutex); 691 return r; 692 } 693 694 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 695 { 696 int ret = 0; 697 698 if (is_support_sw_smu(adev)) { 699 mutex_lock(&adev->pm.mutex); 700 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 701 enable); 702 mutex_unlock(&adev->pm.mutex); 703 } 704 705 return ret; 706 } 707 708 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 709 { 710 struct smu_context *smu = adev->powerplay.pp_handle; 711 int ret = 0; 712 713 if (!is_support_sw_smu(adev)) 714 return -EOPNOTSUPP; 715 716 mutex_lock(&adev->pm.mutex); 717 ret = smu_send_hbm_bad_pages_num(smu, size); 718 mutex_unlock(&adev->pm.mutex); 719 720 return ret; 721 } 722 723 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 724 { 725 struct smu_context *smu = adev->powerplay.pp_handle; 726 int ret = 0; 727 728 if (!is_support_sw_smu(adev)) 729 return -EOPNOTSUPP; 730 731 mutex_lock(&adev->pm.mutex); 732 ret = smu_send_hbm_bad_channel_flag(smu, size); 733 mutex_unlock(&adev->pm.mutex); 734 735 return ret; 736 } 737 738 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) 739 { 740 struct smu_context *smu = adev->powerplay.pp_handle; 741 int ret; 742 743 if (!is_support_sw_smu(adev)) 744 return -EOPNOTSUPP; 745 746 mutex_lock(&adev->pm.mutex); 747 ret = smu_send_rma_reason(smu); 748 mutex_unlock(&adev->pm.mutex); 749 750 return ret; 751 } 752 753 /** 754 * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported 755 * @adev: amdgpu_device pointer 756 * 757 * This function checks if the SMU supports resetting the SDMA engine. 758 * It returns false if the hardware does not support software SMU or 759 * if the feature is not supported. 760 */ 761 bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev) 762 { 763 struct smu_context *smu = adev->powerplay.pp_handle; 764 bool ret; 765 766 if (!is_support_sw_smu(adev)) 767 return false; 768 769 mutex_lock(&adev->pm.mutex); 770 ret = smu_reset_sdma_is_supported(smu); 771 mutex_unlock(&adev->pm.mutex); 772 773 return ret; 774 } 775 776 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask) 777 { 778 struct smu_context *smu = adev->powerplay.pp_handle; 779 int ret; 780 781 if (!is_support_sw_smu(adev)) 782 return -EOPNOTSUPP; 783 784 mutex_lock(&adev->pm.mutex); 785 ret = smu_reset_sdma(smu, inst_mask); 786 mutex_unlock(&adev->pm.mutex); 787 788 return ret; 789 } 790 791 int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask) 792 { 793 struct smu_context *smu = adev->powerplay.pp_handle; 794 int ret; 795 796 if (!is_support_sw_smu(adev)) 797 return -EOPNOTSUPP; 798 799 mutex_lock(&adev->pm.mutex); 800 ret = smu_reset_vcn(smu, inst_mask); 801 mutex_unlock(&adev->pm.mutex); 802 803 return ret; 804 } 805 806 bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev) 807 { 808 struct smu_context *smu = adev->powerplay.pp_handle; 809 bool ret; 810 811 if (!is_support_sw_smu(adev)) 812 return false; 813 814 mutex_lock(&adev->pm.mutex); 815 ret = smu_reset_vcn_is_supported(smu); 816 mutex_unlock(&adev->pm.mutex); 817 818 return ret; 819 } 820 821 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 822 enum pp_clock_type type, 823 uint32_t *min, 824 uint32_t *max) 825 { 826 int ret = 0; 827 828 if (type != PP_SCLK) 829 return -EINVAL; 830 831 if (!is_support_sw_smu(adev)) 832 return -EOPNOTSUPP; 833 834 mutex_lock(&adev->pm.mutex); 835 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 836 SMU_SCLK, 837 min, 838 max); 839 mutex_unlock(&adev->pm.mutex); 840 841 return ret; 842 } 843 844 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 845 enum pp_clock_type type, 846 uint32_t min, 847 uint32_t max) 848 { 849 struct smu_context *smu = adev->powerplay.pp_handle; 850 851 if (!is_support_sw_smu(adev)) 852 return -EOPNOTSUPP; 853 854 guard(mutex)(&adev->pm.mutex); 855 856 return smu_set_soft_freq_range(smu, 857 type, 858 min, 859 max); 860 } 861 862 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 863 { 864 struct smu_context *smu = adev->powerplay.pp_handle; 865 int ret = 0; 866 867 if (!is_support_sw_smu(adev)) 868 return 0; 869 870 mutex_lock(&adev->pm.mutex); 871 ret = smu_write_watermarks_table(smu); 872 mutex_unlock(&adev->pm.mutex); 873 874 return ret; 875 } 876 877 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 878 enum smu_event_type event, 879 uint64_t event_arg) 880 { 881 struct smu_context *smu = adev->powerplay.pp_handle; 882 int ret = 0; 883 884 if (!is_support_sw_smu(adev)) 885 return -EOPNOTSUPP; 886 887 mutex_lock(&adev->pm.mutex); 888 ret = smu_wait_for_event(smu, event, event_arg); 889 mutex_unlock(&adev->pm.mutex); 890 891 return ret; 892 } 893 894 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 895 { 896 struct smu_context *smu = adev->powerplay.pp_handle; 897 int ret = 0; 898 899 if (!is_support_sw_smu(adev)) 900 return -EOPNOTSUPP; 901 902 mutex_lock(&adev->pm.mutex); 903 ret = smu_set_residency_gfxoff(smu, value); 904 mutex_unlock(&adev->pm.mutex); 905 906 return ret; 907 } 908 909 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 910 { 911 struct smu_context *smu = adev->powerplay.pp_handle; 912 int ret = 0; 913 914 if (!is_support_sw_smu(adev)) 915 return -EOPNOTSUPP; 916 917 mutex_lock(&adev->pm.mutex); 918 ret = smu_get_residency_gfxoff(smu, value); 919 mutex_unlock(&adev->pm.mutex); 920 921 return ret; 922 } 923 924 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 925 { 926 struct smu_context *smu = adev->powerplay.pp_handle; 927 int ret = 0; 928 929 if (!is_support_sw_smu(adev)) 930 return -EOPNOTSUPP; 931 932 mutex_lock(&adev->pm.mutex); 933 ret = smu_get_entrycount_gfxoff(smu, value); 934 mutex_unlock(&adev->pm.mutex); 935 936 return ret; 937 } 938 939 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 940 { 941 struct smu_context *smu = adev->powerplay.pp_handle; 942 int ret = 0; 943 944 if (!is_support_sw_smu(adev)) 945 return -EOPNOTSUPP; 946 947 mutex_lock(&adev->pm.mutex); 948 ret = smu_get_status_gfxoff(smu, value); 949 mutex_unlock(&adev->pm.mutex); 950 951 return ret; 952 } 953 954 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 955 { 956 struct smu_context *smu = adev->powerplay.pp_handle; 957 958 if (!is_support_sw_smu(adev)) 959 return 0; 960 961 return atomic64_read(&smu->throttle_int_counter); 962 } 963 964 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 965 * @adev: amdgpu_device pointer 966 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 967 * 968 */ 969 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 970 enum gfx_change_state state) 971 { 972 mutex_lock(&adev->pm.mutex); 973 if (adev->powerplay.pp_funcs && 974 adev->powerplay.pp_funcs->gfx_state_change_set) 975 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 976 (adev)->powerplay.pp_handle, state)); 977 mutex_unlock(&adev->pm.mutex); 978 } 979 980 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 981 void *umc_ecc) 982 { 983 struct smu_context *smu = adev->powerplay.pp_handle; 984 int ret = 0; 985 986 if (!is_support_sw_smu(adev)) 987 return -EOPNOTSUPP; 988 989 mutex_lock(&adev->pm.mutex); 990 ret = smu_get_ecc_info(smu, umc_ecc); 991 mutex_unlock(&adev->pm.mutex); 992 993 return ret; 994 } 995 996 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 997 uint32_t idx) 998 { 999 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1000 struct amd_vce_state *vstate = NULL; 1001 1002 if (!pp_funcs->get_vce_clock_state) 1003 return NULL; 1004 1005 mutex_lock(&adev->pm.mutex); 1006 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 1007 idx); 1008 mutex_unlock(&adev->pm.mutex); 1009 1010 return vstate; 1011 } 1012 1013 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 1014 enum amd_pm_state_type *state) 1015 { 1016 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1017 1018 mutex_lock(&adev->pm.mutex); 1019 1020 if (!pp_funcs->get_current_power_state) { 1021 *state = adev->pm.dpm.user_state; 1022 goto out; 1023 } 1024 1025 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 1026 if (*state < POWER_STATE_TYPE_DEFAULT || 1027 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 1028 *state = adev->pm.dpm.user_state; 1029 1030 out: 1031 mutex_unlock(&adev->pm.mutex); 1032 } 1033 1034 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 1035 enum amd_pm_state_type state) 1036 { 1037 mutex_lock(&adev->pm.mutex); 1038 adev->pm.dpm.user_state = state; 1039 mutex_unlock(&adev->pm.mutex); 1040 1041 if (is_support_sw_smu(adev)) 1042 return; 1043 1044 if (amdgpu_dpm_dispatch_task(adev, 1045 AMD_PP_TASK_ENABLE_USER_STATE, 1046 &state) == -EOPNOTSUPP) 1047 amdgpu_dpm_compute_clocks(adev); 1048 } 1049 1050 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 1051 { 1052 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1053 enum amd_dpm_forced_level level; 1054 1055 if (!pp_funcs) 1056 return AMD_DPM_FORCED_LEVEL_AUTO; 1057 1058 mutex_lock(&adev->pm.mutex); 1059 if (pp_funcs->get_performance_level) 1060 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 1061 else 1062 level = adev->pm.dpm.forced_level; 1063 mutex_unlock(&adev->pm.mutex); 1064 1065 return level; 1066 } 1067 1068 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev) 1069 { 1070 /* enter UMD Pstate */ 1071 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1072 AMD_PG_STATE_UNGATE); 1073 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1074 AMD_CG_STATE_UNGATE); 1075 } 1076 1077 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev) 1078 { 1079 /* exit UMD Pstate */ 1080 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1081 AMD_CG_STATE_GATE); 1082 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1083 AMD_PG_STATE_GATE); 1084 } 1085 1086 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 1087 enum amd_dpm_forced_level level) 1088 { 1089 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1090 enum amd_dpm_forced_level current_level; 1091 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1092 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1093 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1094 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1095 1096 if (!pp_funcs || !pp_funcs->force_performance_level) 1097 return 0; 1098 1099 if (adev->pm.dpm.thermal_active) 1100 return -EINVAL; 1101 1102 current_level = amdgpu_dpm_get_performance_level(adev); 1103 if (current_level == level) 1104 return 0; 1105 1106 if (!(current_level & profile_mode_mask) && 1107 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 1108 return -EINVAL; 1109 1110 if (adev->asic_type == CHIP_RAVEN) { 1111 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 1112 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1113 level == AMD_DPM_FORCED_LEVEL_MANUAL) 1114 amdgpu_gfx_off_ctrl(adev, false); 1115 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 1116 level != AMD_DPM_FORCED_LEVEL_MANUAL) 1117 amdgpu_gfx_off_ctrl(adev, true); 1118 } 1119 } 1120 1121 if (!(current_level & profile_mode_mask) && (level & profile_mode_mask)) 1122 amdgpu_dpm_enter_umd_state(adev); 1123 else if ((current_level & profile_mode_mask) && 1124 !(level & profile_mode_mask)) 1125 amdgpu_dpm_exit_umd_state(adev); 1126 1127 mutex_lock(&adev->pm.mutex); 1128 1129 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 1130 level)) { 1131 mutex_unlock(&adev->pm.mutex); 1132 /* If new level failed, retain the umd state as before */ 1133 if (!(current_level & profile_mode_mask) && 1134 (level & profile_mode_mask)) 1135 amdgpu_dpm_exit_umd_state(adev); 1136 else if ((current_level & profile_mode_mask) && 1137 !(level & profile_mode_mask)) 1138 amdgpu_dpm_enter_umd_state(adev); 1139 1140 return -EINVAL; 1141 } 1142 1143 adev->pm.dpm.forced_level = level; 1144 1145 mutex_unlock(&adev->pm.mutex); 1146 1147 return 0; 1148 } 1149 1150 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 1151 struct pp_states_info *states) 1152 { 1153 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1154 int ret = 0; 1155 1156 if (!pp_funcs->get_pp_num_states) 1157 return -EOPNOTSUPP; 1158 1159 mutex_lock(&adev->pm.mutex); 1160 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 1161 states); 1162 mutex_unlock(&adev->pm.mutex); 1163 1164 return ret; 1165 } 1166 1167 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 1168 enum amd_pp_task task_id, 1169 enum amd_pm_state_type *user_state) 1170 { 1171 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1172 int ret = 0; 1173 1174 if (!pp_funcs->dispatch_tasks) 1175 return -EOPNOTSUPP; 1176 1177 mutex_lock(&adev->pm.mutex); 1178 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 1179 task_id, 1180 user_state); 1181 mutex_unlock(&adev->pm.mutex); 1182 1183 return ret; 1184 } 1185 1186 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1187 { 1188 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1189 int ret = 0; 1190 1191 if (!table) 1192 return -EINVAL; 1193 1194 if (amdgpu_sriov_vf(adev) || !pp_funcs->get_pp_table || adev->scpm_enabled) 1195 return -EOPNOTSUPP; 1196 1197 mutex_lock(&adev->pm.mutex); 1198 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1199 table); 1200 mutex_unlock(&adev->pm.mutex); 1201 1202 return ret; 1203 } 1204 1205 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1206 uint32_t type, 1207 long *input, 1208 uint32_t size) 1209 { 1210 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1211 int ret = 0; 1212 1213 if (!pp_funcs->set_fine_grain_clk_vol) 1214 return 0; 1215 1216 mutex_lock(&adev->pm.mutex); 1217 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1218 type, 1219 input, 1220 size); 1221 mutex_unlock(&adev->pm.mutex); 1222 1223 return ret; 1224 } 1225 1226 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1227 uint32_t type, 1228 long *input, 1229 uint32_t size) 1230 { 1231 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1232 int ret = 0; 1233 1234 if (!pp_funcs->odn_edit_dpm_table) 1235 return 0; 1236 1237 mutex_lock(&adev->pm.mutex); 1238 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1239 type, 1240 input, 1241 size); 1242 mutex_unlock(&adev->pm.mutex); 1243 1244 return ret; 1245 } 1246 1247 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1248 enum pp_clock_type type, 1249 char *buf, 1250 int *offset) 1251 { 1252 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1253 int ret = 0; 1254 1255 if (!pp_funcs->emit_clock_levels) 1256 return -ENOENT; 1257 1258 mutex_lock(&adev->pm.mutex); 1259 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1260 type, 1261 buf, 1262 offset); 1263 mutex_unlock(&adev->pm.mutex); 1264 1265 return ret; 1266 } 1267 1268 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1269 uint64_t ppfeature_masks) 1270 { 1271 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1272 int ret = 0; 1273 1274 if (!pp_funcs->set_ppfeature_status) 1275 return 0; 1276 1277 mutex_lock(&adev->pm.mutex); 1278 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1279 ppfeature_masks); 1280 mutex_unlock(&adev->pm.mutex); 1281 1282 return ret; 1283 } 1284 1285 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1286 { 1287 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1288 int ret = 0; 1289 1290 if (!pp_funcs->get_ppfeature_status) 1291 return 0; 1292 1293 mutex_lock(&adev->pm.mutex); 1294 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1295 buf); 1296 mutex_unlock(&adev->pm.mutex); 1297 1298 return ret; 1299 } 1300 1301 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1302 enum pp_clock_type type, 1303 uint32_t mask) 1304 { 1305 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1306 int ret = 0; 1307 1308 if (!pp_funcs->force_clock_level) 1309 return 0; 1310 1311 mutex_lock(&adev->pm.mutex); 1312 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1313 type, 1314 mask); 1315 mutex_unlock(&adev->pm.mutex); 1316 1317 return ret; 1318 } 1319 1320 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1321 { 1322 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1323 int ret = 0; 1324 1325 if (!pp_funcs->get_sclk_od) 1326 return -EOPNOTSUPP; 1327 1328 mutex_lock(&adev->pm.mutex); 1329 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1330 mutex_unlock(&adev->pm.mutex); 1331 1332 return ret; 1333 } 1334 1335 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1336 { 1337 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1338 1339 if (is_support_sw_smu(adev)) 1340 return -EOPNOTSUPP; 1341 1342 mutex_lock(&adev->pm.mutex); 1343 if (pp_funcs->set_sclk_od) 1344 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1345 mutex_unlock(&adev->pm.mutex); 1346 1347 if (amdgpu_dpm_dispatch_task(adev, 1348 AMD_PP_TASK_READJUST_POWER_STATE, 1349 NULL) == -EOPNOTSUPP) { 1350 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1351 amdgpu_dpm_compute_clocks(adev); 1352 } 1353 1354 return 0; 1355 } 1356 1357 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1358 { 1359 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1360 int ret = 0; 1361 1362 if (!pp_funcs->get_mclk_od) 1363 return -EOPNOTSUPP; 1364 1365 mutex_lock(&adev->pm.mutex); 1366 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1367 mutex_unlock(&adev->pm.mutex); 1368 1369 return ret; 1370 } 1371 1372 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1373 { 1374 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1375 1376 if (is_support_sw_smu(adev)) 1377 return -EOPNOTSUPP; 1378 1379 mutex_lock(&adev->pm.mutex); 1380 if (pp_funcs->set_mclk_od) 1381 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1382 mutex_unlock(&adev->pm.mutex); 1383 1384 if (amdgpu_dpm_dispatch_task(adev, 1385 AMD_PP_TASK_READJUST_POWER_STATE, 1386 NULL) == -EOPNOTSUPP) { 1387 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1388 amdgpu_dpm_compute_clocks(adev); 1389 } 1390 1391 return 0; 1392 } 1393 1394 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1395 char *buf) 1396 { 1397 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1398 int ret = 0; 1399 1400 if (!pp_funcs->get_power_profile_mode) 1401 return -EOPNOTSUPP; 1402 1403 mutex_lock(&adev->pm.mutex); 1404 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1405 buf); 1406 mutex_unlock(&adev->pm.mutex); 1407 1408 return ret; 1409 } 1410 1411 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1412 long *input, uint32_t size) 1413 { 1414 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1415 int ret = 0; 1416 1417 if (!pp_funcs->set_power_profile_mode) 1418 return 0; 1419 1420 mutex_lock(&adev->pm.mutex); 1421 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1422 input, 1423 size); 1424 mutex_unlock(&adev->pm.mutex); 1425 1426 return ret; 1427 } 1428 1429 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1430 { 1431 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1432 int ret = 0; 1433 1434 if (!pp_funcs->get_gpu_metrics) 1435 return 0; 1436 1437 mutex_lock(&adev->pm.mutex); 1438 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1439 table); 1440 mutex_unlock(&adev->pm.mutex); 1441 1442 return ret; 1443 } 1444 1445 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, 1446 size_t size) 1447 { 1448 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1449 int ret = 0; 1450 1451 if (!pp_funcs->get_pm_metrics) 1452 return -EOPNOTSUPP; 1453 1454 mutex_lock(&adev->pm.mutex); 1455 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, 1456 size); 1457 mutex_unlock(&adev->pm.mutex); 1458 1459 return ret; 1460 } 1461 1462 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1463 uint32_t *fan_mode) 1464 { 1465 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1466 int ret = 0; 1467 1468 if (!pp_funcs->get_fan_control_mode) 1469 return -EOPNOTSUPP; 1470 1471 mutex_lock(&adev->pm.mutex); 1472 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1473 fan_mode); 1474 mutex_unlock(&adev->pm.mutex); 1475 1476 return ret; 1477 } 1478 1479 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1480 uint32_t speed) 1481 { 1482 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1483 int ret = 0; 1484 1485 if (!pp_funcs->set_fan_speed_pwm) 1486 return -EOPNOTSUPP; 1487 1488 mutex_lock(&adev->pm.mutex); 1489 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1490 speed); 1491 mutex_unlock(&adev->pm.mutex); 1492 1493 return ret; 1494 } 1495 1496 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1497 uint32_t *speed) 1498 { 1499 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1500 int ret = 0; 1501 1502 if (!pp_funcs->get_fan_speed_pwm) 1503 return -EOPNOTSUPP; 1504 1505 mutex_lock(&adev->pm.mutex); 1506 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1507 speed); 1508 mutex_unlock(&adev->pm.mutex); 1509 1510 return ret; 1511 } 1512 1513 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1514 uint32_t *speed) 1515 { 1516 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1517 int ret = 0; 1518 1519 if (!pp_funcs->get_fan_speed_rpm) 1520 return -EOPNOTSUPP; 1521 1522 mutex_lock(&adev->pm.mutex); 1523 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1524 speed); 1525 mutex_unlock(&adev->pm.mutex); 1526 1527 return ret; 1528 } 1529 1530 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1531 uint32_t speed) 1532 { 1533 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1534 int ret = 0; 1535 1536 if (!pp_funcs->set_fan_speed_rpm) 1537 return -EOPNOTSUPP; 1538 1539 mutex_lock(&adev->pm.mutex); 1540 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1541 speed); 1542 mutex_unlock(&adev->pm.mutex); 1543 1544 return ret; 1545 } 1546 1547 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1548 uint32_t mode) 1549 { 1550 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1551 int ret = 0; 1552 1553 if (!pp_funcs->set_fan_control_mode) 1554 return -EOPNOTSUPP; 1555 1556 mutex_lock(&adev->pm.mutex); 1557 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1558 mode); 1559 mutex_unlock(&adev->pm.mutex); 1560 1561 return ret; 1562 } 1563 1564 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1565 uint32_t *limit, 1566 enum pp_power_limit_level pp_limit_level, 1567 enum pp_power_type power_type) 1568 { 1569 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1570 int ret = 0; 1571 1572 if (!pp_funcs->get_power_limit) 1573 return -EOPNOTSUPP; 1574 1575 mutex_lock(&adev->pm.mutex); 1576 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1577 limit, 1578 pp_limit_level, 1579 power_type); 1580 mutex_unlock(&adev->pm.mutex); 1581 1582 return ret; 1583 } 1584 1585 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1586 uint32_t limit_type, 1587 uint32_t limit) 1588 { 1589 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1590 int ret = 0; 1591 1592 if (!pp_funcs->set_power_limit) 1593 return -EINVAL; 1594 1595 mutex_lock(&adev->pm.mutex); 1596 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1597 limit_type, limit); 1598 mutex_unlock(&adev->pm.mutex); 1599 1600 return ret; 1601 } 1602 1603 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1604 { 1605 bool cclk_dpm_supported = false; 1606 1607 if (!is_support_sw_smu(adev)) 1608 return false; 1609 1610 mutex_lock(&adev->pm.mutex); 1611 cclk_dpm_supported = is_support_cclk_dpm(adev); 1612 mutex_unlock(&adev->pm.mutex); 1613 1614 return (int)cclk_dpm_supported; 1615 } 1616 1617 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1618 struct seq_file *m) 1619 { 1620 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1621 1622 if (!pp_funcs->debugfs_print_current_performance_level) 1623 return -EOPNOTSUPP; 1624 1625 mutex_lock(&adev->pm.mutex); 1626 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1627 m); 1628 mutex_unlock(&adev->pm.mutex); 1629 1630 return 0; 1631 } 1632 1633 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1634 void **addr, 1635 size_t *size) 1636 { 1637 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1638 int ret = 0; 1639 1640 if (!pp_funcs->get_smu_prv_buf_details) 1641 return -ENOSYS; 1642 1643 mutex_lock(&adev->pm.mutex); 1644 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1645 addr, 1646 size); 1647 mutex_unlock(&adev->pm.mutex); 1648 1649 return ret; 1650 } 1651 1652 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1653 { 1654 if (is_support_sw_smu(adev)) { 1655 struct smu_context *smu = adev->powerplay.pp_handle; 1656 1657 return (smu->od_enabled || smu->is_apu); 1658 } else { 1659 struct pp_hwmgr *hwmgr; 1660 1661 /* 1662 * dpm on some legacy asics don't carry od_enabled member 1663 * as its pp_handle is casted directly from adev. 1664 */ 1665 if (amdgpu_dpm_is_legacy_dpm(adev)) 1666 return false; 1667 1668 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1669 1670 return hwmgr->od_enabled; 1671 } 1672 } 1673 1674 int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev) 1675 { 1676 if (is_support_sw_smu(adev)) { 1677 struct smu_context *smu = adev->powerplay.pp_handle; 1678 1679 return smu->od_enabled; 1680 } else { 1681 struct pp_hwmgr *hwmgr; 1682 1683 /* 1684 * dpm on some legacy asics don't carry od_enabled member 1685 * as its pp_handle is casted directly from adev. 1686 */ 1687 if (amdgpu_dpm_is_legacy_dpm(adev)) 1688 return false; 1689 1690 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1691 1692 return hwmgr->od_enabled; 1693 } 1694 } 1695 1696 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1697 const char *buf, 1698 size_t size) 1699 { 1700 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1701 int ret = 0; 1702 1703 if (!buf || !size) 1704 return -EINVAL; 1705 1706 if (amdgpu_sriov_vf(adev) || !pp_funcs->set_pp_table || adev->scpm_enabled) 1707 return -EOPNOTSUPP; 1708 1709 mutex_lock(&adev->pm.mutex); 1710 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1711 buf, 1712 size); 1713 mutex_unlock(&adev->pm.mutex); 1714 1715 return ret; 1716 } 1717 1718 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1719 { 1720 struct smu_context *smu = adev->powerplay.pp_handle; 1721 1722 if (!is_support_sw_smu(adev)) 1723 return INT_MAX; 1724 1725 return smu->cpu_core_num; 1726 } 1727 1728 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1729 { 1730 if (!is_support_sw_smu(adev)) 1731 return; 1732 1733 amdgpu_smu_stb_debug_fs_init(adev); 1734 } 1735 1736 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1737 const struct amd_pp_display_configuration *input) 1738 { 1739 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1740 int ret = 0; 1741 1742 if (!pp_funcs->display_configuration_change) 1743 return 0; 1744 1745 mutex_lock(&adev->pm.mutex); 1746 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1747 input); 1748 mutex_unlock(&adev->pm.mutex); 1749 1750 return ret; 1751 } 1752 1753 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1754 enum amd_pp_clock_type type, 1755 struct amd_pp_clocks *clocks) 1756 { 1757 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1758 int ret = 0; 1759 1760 if (!pp_funcs->get_clock_by_type) 1761 return 0; 1762 1763 mutex_lock(&adev->pm.mutex); 1764 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1765 type, 1766 clocks); 1767 mutex_unlock(&adev->pm.mutex); 1768 1769 return ret; 1770 } 1771 1772 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1773 struct amd_pp_simple_clock_info *clocks) 1774 { 1775 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1776 int ret = 0; 1777 1778 if (!pp_funcs->get_display_mode_validation_clocks) 1779 return 0; 1780 1781 mutex_lock(&adev->pm.mutex); 1782 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1783 clocks); 1784 mutex_unlock(&adev->pm.mutex); 1785 1786 return ret; 1787 } 1788 1789 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1790 enum amd_pp_clock_type type, 1791 struct pp_clock_levels_with_latency *clocks) 1792 { 1793 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1794 int ret = 0; 1795 1796 if (!pp_funcs->get_clock_by_type_with_latency) 1797 return 0; 1798 1799 mutex_lock(&adev->pm.mutex); 1800 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1801 type, 1802 clocks); 1803 mutex_unlock(&adev->pm.mutex); 1804 1805 return ret; 1806 } 1807 1808 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1809 enum amd_pp_clock_type type, 1810 struct pp_clock_levels_with_voltage *clocks) 1811 { 1812 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1813 int ret = 0; 1814 1815 if (!pp_funcs->get_clock_by_type_with_voltage) 1816 return 0; 1817 1818 mutex_lock(&adev->pm.mutex); 1819 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1820 type, 1821 clocks); 1822 mutex_unlock(&adev->pm.mutex); 1823 1824 return ret; 1825 } 1826 1827 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1828 void *clock_ranges) 1829 { 1830 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1831 int ret = 0; 1832 1833 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1834 return -EOPNOTSUPP; 1835 1836 mutex_lock(&adev->pm.mutex); 1837 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1838 clock_ranges); 1839 mutex_unlock(&adev->pm.mutex); 1840 1841 return ret; 1842 } 1843 1844 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1845 struct pp_display_clock_request *clock) 1846 { 1847 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1848 int ret = 0; 1849 1850 if (!pp_funcs->display_clock_voltage_request) 1851 return -EOPNOTSUPP; 1852 1853 mutex_lock(&adev->pm.mutex); 1854 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1855 clock); 1856 mutex_unlock(&adev->pm.mutex); 1857 1858 return ret; 1859 } 1860 1861 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1862 struct amd_pp_clock_info *clocks) 1863 { 1864 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1865 int ret = 0; 1866 1867 if (!pp_funcs->get_current_clocks) 1868 return -EOPNOTSUPP; 1869 1870 mutex_lock(&adev->pm.mutex); 1871 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1872 clocks); 1873 mutex_unlock(&adev->pm.mutex); 1874 1875 return ret; 1876 } 1877 1878 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1879 { 1880 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1881 1882 if (!pp_funcs->notify_smu_enable_pwe) 1883 return; 1884 1885 mutex_lock(&adev->pm.mutex); 1886 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1887 mutex_unlock(&adev->pm.mutex); 1888 } 1889 1890 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1891 uint32_t count) 1892 { 1893 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1894 int ret = 0; 1895 1896 if (!pp_funcs->set_active_display_count) 1897 return -EOPNOTSUPP; 1898 1899 mutex_lock(&adev->pm.mutex); 1900 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1901 count); 1902 mutex_unlock(&adev->pm.mutex); 1903 1904 return ret; 1905 } 1906 1907 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1908 uint32_t clock) 1909 { 1910 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1911 int ret = 0; 1912 1913 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1914 return -EOPNOTSUPP; 1915 1916 mutex_lock(&adev->pm.mutex); 1917 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1918 clock); 1919 mutex_unlock(&adev->pm.mutex); 1920 1921 return ret; 1922 } 1923 1924 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1925 uint32_t clock) 1926 { 1927 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1928 1929 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1930 return; 1931 1932 mutex_lock(&adev->pm.mutex); 1933 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1934 clock); 1935 mutex_unlock(&adev->pm.mutex); 1936 } 1937 1938 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1939 uint32_t clock) 1940 { 1941 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1942 1943 if (!pp_funcs->set_hard_min_fclk_by_freq) 1944 return; 1945 1946 mutex_lock(&adev->pm.mutex); 1947 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1948 clock); 1949 mutex_unlock(&adev->pm.mutex); 1950 } 1951 1952 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1953 bool disable_memory_clock_switch) 1954 { 1955 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1956 int ret = 0; 1957 1958 if (!pp_funcs->display_disable_memory_clock_switch) 1959 return 0; 1960 1961 mutex_lock(&adev->pm.mutex); 1962 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1963 disable_memory_clock_switch); 1964 mutex_unlock(&adev->pm.mutex); 1965 1966 return ret; 1967 } 1968 1969 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1970 struct pp_smu_nv_clock_table *max_clocks) 1971 { 1972 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1973 int ret = 0; 1974 1975 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1976 return -EOPNOTSUPP; 1977 1978 mutex_lock(&adev->pm.mutex); 1979 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1980 max_clocks); 1981 mutex_unlock(&adev->pm.mutex); 1982 1983 return ret; 1984 } 1985 1986 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1987 unsigned int *clock_values_in_khz, 1988 unsigned int *num_states) 1989 { 1990 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1991 int ret = 0; 1992 1993 if (!pp_funcs->get_uclk_dpm_states) 1994 return -EOPNOTSUPP; 1995 1996 mutex_lock(&adev->pm.mutex); 1997 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1998 clock_values_in_khz, 1999 num_states); 2000 mutex_unlock(&adev->pm.mutex); 2001 2002 return ret; 2003 } 2004 2005 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 2006 struct dpm_clocks *clock_table) 2007 { 2008 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2009 int ret = 0; 2010 2011 if (!pp_funcs->get_dpm_clock_table) 2012 return -EOPNOTSUPP; 2013 2014 mutex_lock(&adev->pm.mutex); 2015 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 2016 clock_table); 2017 mutex_unlock(&adev->pm.mutex); 2018 2019 return ret; 2020 } 2021 2022 /** 2023 * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute 2024 * partition 2025 * @adev: Pointer to the device. 2026 * @type: Identifier for the temperature type metrics to be fetched. 2027 * @table: Pointer to a buffer where the metrics will be stored. If NULL, the 2028 * function returns the size of the metrics structure. 2029 * 2030 * This function retrieves metrics for a specific temperature type, If the 2031 * table parameter is NULL, the function returns the size of the metrics 2032 * structure without populating it. 2033 * 2034 * Return: Size of the metrics structure on success, or a negative error code on failure. 2035 */ 2036 ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev, 2037 enum smu_temp_metric_type type, void *table) 2038 { 2039 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2040 int ret; 2041 2042 if (!pp_funcs->get_temp_metrics || 2043 !amdgpu_dpm_is_temp_metrics_supported(adev, type)) 2044 return -EOPNOTSUPP; 2045 2046 mutex_lock(&adev->pm.mutex); 2047 ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table); 2048 mutex_unlock(&adev->pm.mutex); 2049 2050 return ret; 2051 } 2052 2053 /** 2054 * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support 2055 * is available 2056 * @adev: Pointer to the device. 2057 * @type: Identifier for the temperature type metrics to be fetched. 2058 * 2059 * This function returns metrics if specific temperature metrics type is supported or not. 2060 * 2061 * Return: True in case of metrics type supported else false. 2062 */ 2063 bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev, 2064 enum smu_temp_metric_type type) 2065 { 2066 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2067 bool support_temp_metrics = false; 2068 2069 if (!pp_funcs->temp_metrics_is_supported) 2070 return support_temp_metrics; 2071 2072 if (is_support_sw_smu(adev)) { 2073 mutex_lock(&adev->pm.mutex); 2074 support_temp_metrics = 2075 pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type); 2076 mutex_unlock(&adev->pm.mutex); 2077 } 2078 2079 return support_temp_metrics; 2080 } 2081 2082 /** 2083 * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute 2084 * partition 2085 * @adev: Pointer to the device. 2086 * @xcp_id: Identifier of the XCP for which metrics are to be retrieved. 2087 * @table: Pointer to a buffer where the metrics will be stored. If NULL, the 2088 * function returns the size of the metrics structure. 2089 * 2090 * This function retrieves metrics for a specific XCP, including details such as 2091 * VCN/JPEG activity, clock frequencies, and other performance metrics. If the 2092 * table parameter is NULL, the function returns the size of the metrics 2093 * structure without populating it. 2094 * 2095 * Return: Size of the metrics structure on success, or a negative error code on failure. 2096 */ 2097 ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, 2098 void *table) 2099 { 2100 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2101 int ret = 0; 2102 2103 if (!pp_funcs->get_xcp_metrics) 2104 return 0; 2105 2106 mutex_lock(&adev->pm.mutex); 2107 ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id, 2108 table); 2109 mutex_unlock(&adev->pm.mutex); 2110 2111 return ret; 2112 } 2113 2114 const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev) 2115 { 2116 void *pp_handle = adev->powerplay.pp_handle; 2117 2118 return smu_get_ras_smu_driver(pp_handle); 2119 } 2120