1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, 74 uint32_t block_type, 75 bool gate, 76 int inst) 77 { 78 int ret = 0; 79 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 80 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 81 bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN; 82 83 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state && 84 (!is_vcn || adev->vcn.num_vcn_inst == 1)) { 85 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 86 block_type, gate ? "gate" : "ungate"); 87 return 0; 88 } 89 90 mutex_lock(&adev->pm.mutex); 91 92 switch (block_type) { 93 case AMD_IP_BLOCK_TYPE_UVD: 94 case AMD_IP_BLOCK_TYPE_VCE: 95 case AMD_IP_BLOCK_TYPE_GFX: 96 case AMD_IP_BLOCK_TYPE_SDMA: 97 case AMD_IP_BLOCK_TYPE_JPEG: 98 case AMD_IP_BLOCK_TYPE_GMC: 99 case AMD_IP_BLOCK_TYPE_ACP: 100 case AMD_IP_BLOCK_TYPE_VPE: 101 case AMD_IP_BLOCK_TYPE_ISP: 102 if (pp_funcs && pp_funcs->set_powergating_by_smu) 103 ret = (pp_funcs->set_powergating_by_smu( 104 (adev)->powerplay.pp_handle, block_type, gate, 0)); 105 break; 106 case AMD_IP_BLOCK_TYPE_VCN: 107 if (pp_funcs && pp_funcs->set_powergating_by_smu) 108 ret = (pp_funcs->set_powergating_by_smu( 109 (adev)->powerplay.pp_handle, block_type, gate, inst)); 110 break; 111 default: 112 break; 113 } 114 115 if (!ret) 116 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 117 118 mutex_unlock(&adev->pm.mutex); 119 120 return ret; 121 } 122 123 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 124 { 125 struct smu_context *smu = adev->powerplay.pp_handle; 126 int ret = -EOPNOTSUPP; 127 128 mutex_lock(&adev->pm.mutex); 129 ret = smu_set_gfx_power_up_by_imu(smu); 130 mutex_unlock(&adev->pm.mutex); 131 132 msleep(10); 133 134 return ret; 135 } 136 137 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 138 { 139 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 140 void *pp_handle = adev->powerplay.pp_handle; 141 int ret = 0; 142 143 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 144 return -ENOENT; 145 146 mutex_lock(&adev->pm.mutex); 147 148 /* enter BACO state */ 149 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 150 151 mutex_unlock(&adev->pm.mutex); 152 153 return ret; 154 } 155 156 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 157 { 158 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 159 void *pp_handle = adev->powerplay.pp_handle; 160 int ret = 0; 161 162 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 163 return -ENOENT; 164 165 mutex_lock(&adev->pm.mutex); 166 167 /* exit BACO state */ 168 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 169 170 mutex_unlock(&adev->pm.mutex); 171 172 return ret; 173 } 174 175 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 176 enum pp_mp1_state mp1_state) 177 { 178 int ret = 0; 179 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 180 181 if (mp1_state == PP_MP1_STATE_FLR) { 182 /* VF lost access to SMU */ 183 if (amdgpu_sriov_vf(adev)) 184 adev->pm.dpm_enabled = false; 185 } else if (pp_funcs && pp_funcs->set_mp1_state) { 186 mutex_lock(&adev->pm.mutex); 187 188 ret = pp_funcs->set_mp1_state( 189 adev->powerplay.pp_handle, 190 mp1_state); 191 192 mutex_unlock(&adev->pm.mutex); 193 } 194 195 return ret; 196 } 197 198 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 199 { 200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 201 void *pp_handle = adev->powerplay.pp_handle; 202 int ret; 203 204 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 205 return 0; 206 /* Don't use baco for reset in S3. 207 * This is a workaround for some platforms 208 * where entering BACO during suspend 209 * seems to cause reboots or hangs. 210 * This might be related to the fact that BACO controls 211 * power to the whole GPU including devices like audio and USB. 212 * Powering down/up everything may adversely affect these other 213 * devices. Needs more investigation. 214 */ 215 if (adev->in_s3) 216 return 0; 217 218 mutex_lock(&adev->pm.mutex); 219 220 ret = pp_funcs->get_asic_baco_capability(pp_handle); 221 222 mutex_unlock(&adev->pm.mutex); 223 224 return ret; 225 } 226 227 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 228 { 229 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 230 void *pp_handle = adev->powerplay.pp_handle; 231 int ret = 0; 232 233 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 234 return -ENOENT; 235 236 mutex_lock(&adev->pm.mutex); 237 238 ret = pp_funcs->asic_reset_mode_2(pp_handle); 239 240 mutex_unlock(&adev->pm.mutex); 241 242 return ret; 243 } 244 245 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 246 { 247 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 248 void *pp_handle = adev->powerplay.pp_handle; 249 int ret = 0; 250 251 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 252 return -ENOENT; 253 254 mutex_lock(&adev->pm.mutex); 255 256 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 257 258 mutex_unlock(&adev->pm.mutex); 259 260 return ret; 261 } 262 263 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 264 { 265 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 266 void *pp_handle = adev->powerplay.pp_handle; 267 int ret = 0; 268 269 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 270 return -ENOENT; 271 272 mutex_lock(&adev->pm.mutex); 273 274 /* enter BACO state */ 275 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 276 if (ret) 277 goto out; 278 279 /* exit BACO state */ 280 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 281 282 out: 283 mutex_unlock(&adev->pm.mutex); 284 return ret; 285 } 286 287 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 288 { 289 struct smu_context *smu = adev->powerplay.pp_handle; 290 bool support_mode1_reset = false; 291 292 if (is_support_sw_smu(adev)) { 293 mutex_lock(&adev->pm.mutex); 294 support_mode1_reset = smu_mode1_reset_is_support(smu); 295 mutex_unlock(&adev->pm.mutex); 296 } 297 298 return support_mode1_reset; 299 } 300 301 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 302 { 303 struct smu_context *smu = adev->powerplay.pp_handle; 304 int ret = -EOPNOTSUPP; 305 306 if (is_support_sw_smu(adev)) { 307 mutex_lock(&adev->pm.mutex); 308 ret = smu_mode1_reset(smu); 309 mutex_unlock(&adev->pm.mutex); 310 } 311 312 return ret; 313 } 314 315 bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev) 316 { 317 struct smu_context *smu = adev->powerplay.pp_handle; 318 bool support_link_reset = false; 319 320 if (is_support_sw_smu(adev)) { 321 mutex_lock(&adev->pm.mutex); 322 support_link_reset = smu_link_reset_is_support(smu); 323 mutex_unlock(&adev->pm.mutex); 324 } 325 326 return support_link_reset; 327 } 328 329 int amdgpu_dpm_link_reset(struct amdgpu_device *adev) 330 { 331 struct smu_context *smu = adev->powerplay.pp_handle; 332 int ret = -EOPNOTSUPP; 333 334 if (is_support_sw_smu(adev)) { 335 mutex_lock(&adev->pm.mutex); 336 ret = smu_link_reset(smu); 337 mutex_unlock(&adev->pm.mutex); 338 } 339 340 return ret; 341 } 342 343 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 344 enum PP_SMC_POWER_PROFILE type, 345 bool en) 346 { 347 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 348 int ret = 0; 349 350 if (amdgpu_sriov_vf(adev)) 351 return 0; 352 353 if (pp_funcs && pp_funcs->switch_power_profile) { 354 mutex_lock(&adev->pm.mutex); 355 ret = pp_funcs->switch_power_profile( 356 adev->powerplay.pp_handle, type, en); 357 mutex_unlock(&adev->pm.mutex); 358 } 359 360 return ret; 361 } 362 363 int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev, 364 bool pause) 365 { 366 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 367 int ret = 0; 368 369 if (amdgpu_sriov_vf(adev)) 370 return 0; 371 372 if (pp_funcs && pp_funcs->pause_power_profile) { 373 mutex_lock(&adev->pm.mutex); 374 ret = pp_funcs->pause_power_profile( 375 adev->powerplay.pp_handle, pause); 376 mutex_unlock(&adev->pm.mutex); 377 } 378 379 return ret; 380 } 381 382 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 383 uint32_t pstate) 384 { 385 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 386 int ret = 0; 387 388 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 389 mutex_lock(&adev->pm.mutex); 390 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 391 pstate); 392 mutex_unlock(&adev->pm.mutex); 393 } 394 395 return ret; 396 } 397 398 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 399 uint32_t cstate) 400 { 401 int ret = 0; 402 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 403 void *pp_handle = adev->powerplay.pp_handle; 404 405 if (pp_funcs && pp_funcs->set_df_cstate) { 406 mutex_lock(&adev->pm.mutex); 407 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 408 mutex_unlock(&adev->pm.mutex); 409 } 410 411 return ret; 412 } 413 414 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, 415 enum pp_pm_policy p_type, char *buf) 416 { 417 struct smu_context *smu = adev->powerplay.pp_handle; 418 int ret = -EOPNOTSUPP; 419 420 if (is_support_sw_smu(adev)) { 421 mutex_lock(&adev->pm.mutex); 422 ret = smu_get_pm_policy_info(smu, p_type, buf); 423 mutex_unlock(&adev->pm.mutex); 424 } 425 426 return ret; 427 } 428 429 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, 430 int policy_level) 431 { 432 struct smu_context *smu = adev->powerplay.pp_handle; 433 int ret = -EOPNOTSUPP; 434 435 if (is_support_sw_smu(adev)) { 436 mutex_lock(&adev->pm.mutex); 437 ret = smu_set_pm_policy(smu, policy_type, policy_level); 438 mutex_unlock(&adev->pm.mutex); 439 } 440 441 return ret; 442 } 443 444 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 445 { 446 void *pp_handle = adev->powerplay.pp_handle; 447 const struct amd_pm_funcs *pp_funcs = 448 adev->powerplay.pp_funcs; 449 int ret = 0; 450 451 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 452 mutex_lock(&adev->pm.mutex); 453 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 454 mutex_unlock(&adev->pm.mutex); 455 } 456 457 return ret; 458 } 459 460 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 461 uint32_t msg_id) 462 { 463 void *pp_handle = adev->powerplay.pp_handle; 464 const struct amd_pm_funcs *pp_funcs = 465 adev->powerplay.pp_funcs; 466 int ret = 0; 467 468 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 469 mutex_lock(&adev->pm.mutex); 470 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 471 msg_id); 472 mutex_unlock(&adev->pm.mutex); 473 } 474 475 return ret; 476 } 477 478 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 479 bool acquire) 480 { 481 void *pp_handle = adev->powerplay.pp_handle; 482 const struct amd_pm_funcs *pp_funcs = 483 adev->powerplay.pp_funcs; 484 int ret = -EOPNOTSUPP; 485 486 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 487 mutex_lock(&adev->pm.mutex); 488 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 489 acquire); 490 mutex_unlock(&adev->pm.mutex); 491 } 492 493 return ret; 494 } 495 496 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 497 { 498 if (adev->pm.dpm_enabled) { 499 mutex_lock(&adev->pm.mutex); 500 if (power_supply_is_system_supplied() > 0) 501 adev->pm.ac_power = true; 502 else 503 adev->pm.ac_power = false; 504 505 if (adev->powerplay.pp_funcs && 506 adev->powerplay.pp_funcs->enable_bapm) 507 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 508 509 if (is_support_sw_smu(adev)) 510 smu_set_ac_dc(adev->powerplay.pp_handle); 511 512 mutex_unlock(&adev->pm.mutex); 513 } 514 } 515 516 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 517 void *data, uint32_t *size) 518 { 519 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 520 int ret = -EINVAL; 521 522 if (!data || !size) 523 return -EINVAL; 524 525 if (pp_funcs && pp_funcs->read_sensor) { 526 mutex_lock(&adev->pm.mutex); 527 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 528 sensor, 529 data, 530 size); 531 mutex_unlock(&adev->pm.mutex); 532 } 533 534 return ret; 535 } 536 537 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 538 { 539 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 540 int ret = -EOPNOTSUPP; 541 542 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 543 mutex_lock(&adev->pm.mutex); 544 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 545 mutex_unlock(&adev->pm.mutex); 546 } 547 548 return ret; 549 } 550 551 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 552 { 553 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 554 int ret = -EOPNOTSUPP; 555 556 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 557 mutex_lock(&adev->pm.mutex); 558 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 559 mutex_unlock(&adev->pm.mutex); 560 } 561 562 return ret; 563 } 564 565 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 566 { 567 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 568 int i; 569 570 if (!adev->pm.dpm_enabled) 571 return; 572 573 if (!pp_funcs->pm_compute_clocks) 574 return; 575 576 if (adev->mode_info.num_crtc) 577 amdgpu_display_bandwidth_update(adev); 578 579 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 580 struct amdgpu_ring *ring = adev->rings[i]; 581 if (ring && ring->sched.ready) 582 amdgpu_fence_wait_empty(ring); 583 } 584 585 mutex_lock(&adev->pm.mutex); 586 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 587 mutex_unlock(&adev->pm.mutex); 588 } 589 590 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 591 { 592 int ret = 0; 593 594 if (adev->family == AMDGPU_FAMILY_SI) { 595 mutex_lock(&adev->pm.mutex); 596 if (enable) { 597 adev->pm.dpm.uvd_active = true; 598 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 599 } else { 600 adev->pm.dpm.uvd_active = false; 601 } 602 mutex_unlock(&adev->pm.mutex); 603 604 amdgpu_dpm_compute_clocks(adev); 605 return; 606 } 607 608 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0); 609 if (ret) 610 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 611 enable ? "enable" : "disable", ret); 612 } 613 614 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst) 615 { 616 int ret = 0; 617 618 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst); 619 if (ret) 620 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 621 enable ? "enable" : "disable", ret); 622 } 623 624 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 625 { 626 int ret = 0; 627 628 if (adev->family == AMDGPU_FAMILY_SI) { 629 mutex_lock(&adev->pm.mutex); 630 if (enable) { 631 adev->pm.dpm.vce_active = true; 632 /* XXX select vce level based on ring/task */ 633 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 634 } else { 635 adev->pm.dpm.vce_active = false; 636 } 637 mutex_unlock(&adev->pm.mutex); 638 639 amdgpu_dpm_compute_clocks(adev); 640 return; 641 } 642 643 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0); 644 if (ret) 645 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 646 enable ? "enable" : "disable", ret); 647 } 648 649 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 650 { 651 int ret = 0; 652 653 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0); 654 if (ret) 655 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 656 enable ? "enable" : "disable", ret); 657 } 658 659 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) 660 { 661 int ret = 0; 662 663 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0); 664 if (ret) 665 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", 666 enable ? "enable" : "disable", ret); 667 } 668 669 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 670 { 671 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 672 int r = 0; 673 674 if (!pp_funcs || !pp_funcs->load_firmware || 675 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU))) 676 return 0; 677 678 mutex_lock(&adev->pm.mutex); 679 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 680 if (r) { 681 pr_err("smu firmware loading failed\n"); 682 goto out; 683 } 684 685 if (smu_version) 686 *smu_version = adev->pm.fw_version; 687 688 out: 689 mutex_unlock(&adev->pm.mutex); 690 return r; 691 } 692 693 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 694 { 695 int ret = 0; 696 697 if (is_support_sw_smu(adev)) { 698 mutex_lock(&adev->pm.mutex); 699 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 700 enable); 701 mutex_unlock(&adev->pm.mutex); 702 } 703 704 return ret; 705 } 706 707 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 708 { 709 struct smu_context *smu = adev->powerplay.pp_handle; 710 int ret = 0; 711 712 if (!is_support_sw_smu(adev)) 713 return -EOPNOTSUPP; 714 715 mutex_lock(&adev->pm.mutex); 716 ret = smu_send_hbm_bad_pages_num(smu, size); 717 mutex_unlock(&adev->pm.mutex); 718 719 return ret; 720 } 721 722 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 723 { 724 struct smu_context *smu = adev->powerplay.pp_handle; 725 int ret = 0; 726 727 if (!is_support_sw_smu(adev)) 728 return -EOPNOTSUPP; 729 730 mutex_lock(&adev->pm.mutex); 731 ret = smu_send_hbm_bad_channel_flag(smu, size); 732 mutex_unlock(&adev->pm.mutex); 733 734 return ret; 735 } 736 737 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev) 738 { 739 struct smu_context *smu = adev->powerplay.pp_handle; 740 int ret; 741 742 if (!is_support_sw_smu(adev)) 743 return -EOPNOTSUPP; 744 745 mutex_lock(&adev->pm.mutex); 746 ret = smu_send_rma_reason(smu); 747 mutex_unlock(&adev->pm.mutex); 748 749 return ret; 750 } 751 752 /** 753 * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported 754 * @adev: amdgpu_device pointer 755 * 756 * This function checks if the SMU supports resetting the SDMA engine. 757 * It returns false if the hardware does not support software SMU or 758 * if the feature is not supported. 759 */ 760 bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev) 761 { 762 struct smu_context *smu = adev->powerplay.pp_handle; 763 bool ret; 764 765 if (!is_support_sw_smu(adev)) 766 return false; 767 768 mutex_lock(&adev->pm.mutex); 769 ret = smu_reset_sdma_is_supported(smu); 770 mutex_unlock(&adev->pm.mutex); 771 772 return ret; 773 } 774 775 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask) 776 { 777 struct smu_context *smu = adev->powerplay.pp_handle; 778 int ret; 779 780 if (!is_support_sw_smu(adev)) 781 return -EOPNOTSUPP; 782 783 mutex_lock(&adev->pm.mutex); 784 ret = smu_reset_sdma(smu, inst_mask); 785 mutex_unlock(&adev->pm.mutex); 786 787 return ret; 788 } 789 790 int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask) 791 { 792 struct smu_context *smu = adev->powerplay.pp_handle; 793 int ret; 794 795 if (!is_support_sw_smu(adev)) 796 return -EOPNOTSUPP; 797 798 mutex_lock(&adev->pm.mutex); 799 ret = smu_reset_vcn(smu, inst_mask); 800 mutex_unlock(&adev->pm.mutex); 801 802 return ret; 803 } 804 805 bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev) 806 { 807 struct smu_context *smu = adev->powerplay.pp_handle; 808 bool ret; 809 810 if (!is_support_sw_smu(adev)) 811 return false; 812 813 mutex_lock(&adev->pm.mutex); 814 ret = smu_reset_vcn_is_supported(smu); 815 mutex_unlock(&adev->pm.mutex); 816 817 return ret; 818 } 819 820 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 821 enum pp_clock_type type, 822 uint32_t *min, 823 uint32_t *max) 824 { 825 int ret = 0; 826 827 if (type != PP_SCLK) 828 return -EINVAL; 829 830 if (!is_support_sw_smu(adev)) 831 return -EOPNOTSUPP; 832 833 mutex_lock(&adev->pm.mutex); 834 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 835 SMU_SCLK, 836 min, 837 max); 838 mutex_unlock(&adev->pm.mutex); 839 840 return ret; 841 } 842 843 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 844 enum pp_clock_type type, 845 uint32_t min, 846 uint32_t max) 847 { 848 struct smu_context *smu = adev->powerplay.pp_handle; 849 850 if (!is_support_sw_smu(adev)) 851 return -EOPNOTSUPP; 852 853 guard(mutex)(&adev->pm.mutex); 854 855 return smu_set_soft_freq_range(smu, 856 type, 857 min, 858 max); 859 } 860 861 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 862 { 863 struct smu_context *smu = adev->powerplay.pp_handle; 864 int ret = 0; 865 866 if (!is_support_sw_smu(adev)) 867 return 0; 868 869 mutex_lock(&adev->pm.mutex); 870 ret = smu_write_watermarks_table(smu); 871 mutex_unlock(&adev->pm.mutex); 872 873 return ret; 874 } 875 876 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 877 enum smu_event_type event, 878 uint64_t event_arg) 879 { 880 struct smu_context *smu = adev->powerplay.pp_handle; 881 int ret = 0; 882 883 if (!is_support_sw_smu(adev)) 884 return -EOPNOTSUPP; 885 886 mutex_lock(&adev->pm.mutex); 887 ret = smu_wait_for_event(smu, event, event_arg); 888 mutex_unlock(&adev->pm.mutex); 889 890 return ret; 891 } 892 893 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 894 { 895 struct smu_context *smu = adev->powerplay.pp_handle; 896 int ret = 0; 897 898 if (!is_support_sw_smu(adev)) 899 return -EOPNOTSUPP; 900 901 mutex_lock(&adev->pm.mutex); 902 ret = smu_set_residency_gfxoff(smu, value); 903 mutex_unlock(&adev->pm.mutex); 904 905 return ret; 906 } 907 908 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 909 { 910 struct smu_context *smu = adev->powerplay.pp_handle; 911 int ret = 0; 912 913 if (!is_support_sw_smu(adev)) 914 return -EOPNOTSUPP; 915 916 mutex_lock(&adev->pm.mutex); 917 ret = smu_get_residency_gfxoff(smu, value); 918 mutex_unlock(&adev->pm.mutex); 919 920 return ret; 921 } 922 923 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 924 { 925 struct smu_context *smu = adev->powerplay.pp_handle; 926 int ret = 0; 927 928 if (!is_support_sw_smu(adev)) 929 return -EOPNOTSUPP; 930 931 mutex_lock(&adev->pm.mutex); 932 ret = smu_get_entrycount_gfxoff(smu, value); 933 mutex_unlock(&adev->pm.mutex); 934 935 return ret; 936 } 937 938 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 939 { 940 struct smu_context *smu = adev->powerplay.pp_handle; 941 int ret = 0; 942 943 if (!is_support_sw_smu(adev)) 944 return -EOPNOTSUPP; 945 946 mutex_lock(&adev->pm.mutex); 947 ret = smu_get_status_gfxoff(smu, value); 948 mutex_unlock(&adev->pm.mutex); 949 950 return ret; 951 } 952 953 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 954 { 955 struct smu_context *smu = adev->powerplay.pp_handle; 956 957 if (!is_support_sw_smu(adev)) 958 return 0; 959 960 return atomic64_read(&smu->throttle_int_counter); 961 } 962 963 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 964 * @adev: amdgpu_device pointer 965 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 966 * 967 */ 968 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 969 enum gfx_change_state state) 970 { 971 mutex_lock(&adev->pm.mutex); 972 if (adev->powerplay.pp_funcs && 973 adev->powerplay.pp_funcs->gfx_state_change_set) 974 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 975 (adev)->powerplay.pp_handle, state)); 976 mutex_unlock(&adev->pm.mutex); 977 } 978 979 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 980 void *umc_ecc) 981 { 982 struct smu_context *smu = adev->powerplay.pp_handle; 983 int ret = 0; 984 985 if (!is_support_sw_smu(adev)) 986 return -EOPNOTSUPP; 987 988 mutex_lock(&adev->pm.mutex); 989 ret = smu_get_ecc_info(smu, umc_ecc); 990 mutex_unlock(&adev->pm.mutex); 991 992 return ret; 993 } 994 995 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 996 uint32_t idx) 997 { 998 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 999 struct amd_vce_state *vstate = NULL; 1000 1001 if (!pp_funcs->get_vce_clock_state) 1002 return NULL; 1003 1004 mutex_lock(&adev->pm.mutex); 1005 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 1006 idx); 1007 mutex_unlock(&adev->pm.mutex); 1008 1009 return vstate; 1010 } 1011 1012 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 1013 enum amd_pm_state_type *state) 1014 { 1015 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1016 1017 mutex_lock(&adev->pm.mutex); 1018 1019 if (!pp_funcs->get_current_power_state) { 1020 *state = adev->pm.dpm.user_state; 1021 goto out; 1022 } 1023 1024 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 1025 if (*state < POWER_STATE_TYPE_DEFAULT || 1026 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 1027 *state = adev->pm.dpm.user_state; 1028 1029 out: 1030 mutex_unlock(&adev->pm.mutex); 1031 } 1032 1033 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 1034 enum amd_pm_state_type state) 1035 { 1036 mutex_lock(&adev->pm.mutex); 1037 adev->pm.dpm.user_state = state; 1038 mutex_unlock(&adev->pm.mutex); 1039 1040 if (is_support_sw_smu(adev)) 1041 return; 1042 1043 if (amdgpu_dpm_dispatch_task(adev, 1044 AMD_PP_TASK_ENABLE_USER_STATE, 1045 &state) == -EOPNOTSUPP) 1046 amdgpu_dpm_compute_clocks(adev); 1047 } 1048 1049 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 1050 { 1051 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1052 enum amd_dpm_forced_level level; 1053 1054 if (!pp_funcs) 1055 return AMD_DPM_FORCED_LEVEL_AUTO; 1056 1057 mutex_lock(&adev->pm.mutex); 1058 if (pp_funcs->get_performance_level) 1059 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 1060 else 1061 level = adev->pm.dpm.forced_level; 1062 mutex_unlock(&adev->pm.mutex); 1063 1064 return level; 1065 } 1066 1067 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev) 1068 { 1069 /* enter UMD Pstate */ 1070 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1071 AMD_PG_STATE_UNGATE); 1072 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1073 AMD_CG_STATE_UNGATE); 1074 } 1075 1076 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev) 1077 { 1078 /* exit UMD Pstate */ 1079 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1080 AMD_CG_STATE_GATE); 1081 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, 1082 AMD_PG_STATE_GATE); 1083 } 1084 1085 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 1086 enum amd_dpm_forced_level level) 1087 { 1088 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1089 enum amd_dpm_forced_level current_level; 1090 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1091 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1092 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1093 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1094 1095 if (!pp_funcs || !pp_funcs->force_performance_level) 1096 return 0; 1097 1098 if (adev->pm.dpm.thermal_active) 1099 return -EINVAL; 1100 1101 current_level = amdgpu_dpm_get_performance_level(adev); 1102 if (current_level == level) 1103 return 0; 1104 1105 if (!(current_level & profile_mode_mask) && 1106 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 1107 return -EINVAL; 1108 1109 if (adev->asic_type == CHIP_RAVEN) { 1110 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 1111 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1112 level == AMD_DPM_FORCED_LEVEL_MANUAL) 1113 amdgpu_gfx_off_ctrl(adev, false); 1114 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 1115 level != AMD_DPM_FORCED_LEVEL_MANUAL) 1116 amdgpu_gfx_off_ctrl(adev, true); 1117 } 1118 } 1119 1120 if (!(current_level & profile_mode_mask) && (level & profile_mode_mask)) 1121 amdgpu_dpm_enter_umd_state(adev); 1122 else if ((current_level & profile_mode_mask) && 1123 !(level & profile_mode_mask)) 1124 amdgpu_dpm_exit_umd_state(adev); 1125 1126 mutex_lock(&adev->pm.mutex); 1127 1128 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 1129 level)) { 1130 mutex_unlock(&adev->pm.mutex); 1131 /* If new level failed, retain the umd state as before */ 1132 if (!(current_level & profile_mode_mask) && 1133 (level & profile_mode_mask)) 1134 amdgpu_dpm_exit_umd_state(adev); 1135 else if ((current_level & profile_mode_mask) && 1136 !(level & profile_mode_mask)) 1137 amdgpu_dpm_enter_umd_state(adev); 1138 1139 return -EINVAL; 1140 } 1141 1142 adev->pm.dpm.forced_level = level; 1143 1144 mutex_unlock(&adev->pm.mutex); 1145 1146 return 0; 1147 } 1148 1149 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 1150 struct pp_states_info *states) 1151 { 1152 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1153 int ret = 0; 1154 1155 if (!pp_funcs->get_pp_num_states) 1156 return -EOPNOTSUPP; 1157 1158 mutex_lock(&adev->pm.mutex); 1159 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 1160 states); 1161 mutex_unlock(&adev->pm.mutex); 1162 1163 return ret; 1164 } 1165 1166 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 1167 enum amd_pp_task task_id, 1168 enum amd_pm_state_type *user_state) 1169 { 1170 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1171 int ret = 0; 1172 1173 if (!pp_funcs->dispatch_tasks) 1174 return -EOPNOTSUPP; 1175 1176 mutex_lock(&adev->pm.mutex); 1177 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 1178 task_id, 1179 user_state); 1180 mutex_unlock(&adev->pm.mutex); 1181 1182 return ret; 1183 } 1184 1185 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1186 { 1187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1188 int ret = 0; 1189 1190 if (!pp_funcs->get_pp_table) 1191 return 0; 1192 1193 mutex_lock(&adev->pm.mutex); 1194 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1195 table); 1196 mutex_unlock(&adev->pm.mutex); 1197 1198 return ret; 1199 } 1200 1201 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1202 uint32_t type, 1203 long *input, 1204 uint32_t size) 1205 { 1206 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1207 int ret = 0; 1208 1209 if (!pp_funcs->set_fine_grain_clk_vol) 1210 return 0; 1211 1212 mutex_lock(&adev->pm.mutex); 1213 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1214 type, 1215 input, 1216 size); 1217 mutex_unlock(&adev->pm.mutex); 1218 1219 return ret; 1220 } 1221 1222 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1223 uint32_t type, 1224 long *input, 1225 uint32_t size) 1226 { 1227 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1228 int ret = 0; 1229 1230 if (!pp_funcs->odn_edit_dpm_table) 1231 return 0; 1232 1233 mutex_lock(&adev->pm.mutex); 1234 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1235 type, 1236 input, 1237 size); 1238 mutex_unlock(&adev->pm.mutex); 1239 1240 return ret; 1241 } 1242 1243 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1244 enum pp_clock_type type, 1245 char *buf) 1246 { 1247 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1248 int ret = 0; 1249 1250 if (!pp_funcs->print_clock_levels) 1251 return 0; 1252 1253 mutex_lock(&adev->pm.mutex); 1254 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1255 type, 1256 buf); 1257 mutex_unlock(&adev->pm.mutex); 1258 1259 return ret; 1260 } 1261 1262 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1263 enum pp_clock_type type, 1264 char *buf, 1265 int *offset) 1266 { 1267 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1268 int ret = 0; 1269 1270 if (!pp_funcs->emit_clock_levels) 1271 return -ENOENT; 1272 1273 mutex_lock(&adev->pm.mutex); 1274 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1275 type, 1276 buf, 1277 offset); 1278 mutex_unlock(&adev->pm.mutex); 1279 1280 return ret; 1281 } 1282 1283 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1284 uint64_t ppfeature_masks) 1285 { 1286 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1287 int ret = 0; 1288 1289 if (!pp_funcs->set_ppfeature_status) 1290 return 0; 1291 1292 mutex_lock(&adev->pm.mutex); 1293 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1294 ppfeature_masks); 1295 mutex_unlock(&adev->pm.mutex); 1296 1297 return ret; 1298 } 1299 1300 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1301 { 1302 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1303 int ret = 0; 1304 1305 if (!pp_funcs->get_ppfeature_status) 1306 return 0; 1307 1308 mutex_lock(&adev->pm.mutex); 1309 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1310 buf); 1311 mutex_unlock(&adev->pm.mutex); 1312 1313 return ret; 1314 } 1315 1316 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1317 enum pp_clock_type type, 1318 uint32_t mask) 1319 { 1320 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1321 int ret = 0; 1322 1323 if (!pp_funcs->force_clock_level) 1324 return 0; 1325 1326 mutex_lock(&adev->pm.mutex); 1327 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1328 type, 1329 mask); 1330 mutex_unlock(&adev->pm.mutex); 1331 1332 return ret; 1333 } 1334 1335 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1336 { 1337 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1338 int ret = 0; 1339 1340 if (!pp_funcs->get_sclk_od) 1341 return -EOPNOTSUPP; 1342 1343 mutex_lock(&adev->pm.mutex); 1344 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1345 mutex_unlock(&adev->pm.mutex); 1346 1347 return ret; 1348 } 1349 1350 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1351 { 1352 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1353 1354 if (is_support_sw_smu(adev)) 1355 return -EOPNOTSUPP; 1356 1357 mutex_lock(&adev->pm.mutex); 1358 if (pp_funcs->set_sclk_od) 1359 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1360 mutex_unlock(&adev->pm.mutex); 1361 1362 if (amdgpu_dpm_dispatch_task(adev, 1363 AMD_PP_TASK_READJUST_POWER_STATE, 1364 NULL) == -EOPNOTSUPP) { 1365 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1366 amdgpu_dpm_compute_clocks(adev); 1367 } 1368 1369 return 0; 1370 } 1371 1372 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1373 { 1374 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1375 int ret = 0; 1376 1377 if (!pp_funcs->get_mclk_od) 1378 return -EOPNOTSUPP; 1379 1380 mutex_lock(&adev->pm.mutex); 1381 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1382 mutex_unlock(&adev->pm.mutex); 1383 1384 return ret; 1385 } 1386 1387 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1388 { 1389 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1390 1391 if (is_support_sw_smu(adev)) 1392 return -EOPNOTSUPP; 1393 1394 mutex_lock(&adev->pm.mutex); 1395 if (pp_funcs->set_mclk_od) 1396 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1397 mutex_unlock(&adev->pm.mutex); 1398 1399 if (amdgpu_dpm_dispatch_task(adev, 1400 AMD_PP_TASK_READJUST_POWER_STATE, 1401 NULL) == -EOPNOTSUPP) { 1402 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1403 amdgpu_dpm_compute_clocks(adev); 1404 } 1405 1406 return 0; 1407 } 1408 1409 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1410 char *buf) 1411 { 1412 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1413 int ret = 0; 1414 1415 if (!pp_funcs->get_power_profile_mode) 1416 return -EOPNOTSUPP; 1417 1418 mutex_lock(&adev->pm.mutex); 1419 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1420 buf); 1421 mutex_unlock(&adev->pm.mutex); 1422 1423 return ret; 1424 } 1425 1426 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1427 long *input, uint32_t size) 1428 { 1429 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1430 int ret = 0; 1431 1432 if (!pp_funcs->set_power_profile_mode) 1433 return 0; 1434 1435 mutex_lock(&adev->pm.mutex); 1436 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1437 input, 1438 size); 1439 mutex_unlock(&adev->pm.mutex); 1440 1441 return ret; 1442 } 1443 1444 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1445 { 1446 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1447 int ret = 0; 1448 1449 if (!pp_funcs->get_gpu_metrics) 1450 return 0; 1451 1452 mutex_lock(&adev->pm.mutex); 1453 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1454 table); 1455 mutex_unlock(&adev->pm.mutex); 1456 1457 return ret; 1458 } 1459 1460 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, 1461 size_t size) 1462 { 1463 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1464 int ret = 0; 1465 1466 if (!pp_funcs->get_pm_metrics) 1467 return -EOPNOTSUPP; 1468 1469 mutex_lock(&adev->pm.mutex); 1470 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, 1471 size); 1472 mutex_unlock(&adev->pm.mutex); 1473 1474 return ret; 1475 } 1476 1477 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1478 uint32_t *fan_mode) 1479 { 1480 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1481 int ret = 0; 1482 1483 if (!pp_funcs->get_fan_control_mode) 1484 return -EOPNOTSUPP; 1485 1486 mutex_lock(&adev->pm.mutex); 1487 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1488 fan_mode); 1489 mutex_unlock(&adev->pm.mutex); 1490 1491 return ret; 1492 } 1493 1494 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1495 uint32_t speed) 1496 { 1497 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1498 int ret = 0; 1499 1500 if (!pp_funcs->set_fan_speed_pwm) 1501 return -EOPNOTSUPP; 1502 1503 mutex_lock(&adev->pm.mutex); 1504 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1505 speed); 1506 mutex_unlock(&adev->pm.mutex); 1507 1508 return ret; 1509 } 1510 1511 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1512 uint32_t *speed) 1513 { 1514 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1515 int ret = 0; 1516 1517 if (!pp_funcs->get_fan_speed_pwm) 1518 return -EOPNOTSUPP; 1519 1520 mutex_lock(&adev->pm.mutex); 1521 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1522 speed); 1523 mutex_unlock(&adev->pm.mutex); 1524 1525 return ret; 1526 } 1527 1528 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1529 uint32_t *speed) 1530 { 1531 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1532 int ret = 0; 1533 1534 if (!pp_funcs->get_fan_speed_rpm) 1535 return -EOPNOTSUPP; 1536 1537 mutex_lock(&adev->pm.mutex); 1538 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1539 speed); 1540 mutex_unlock(&adev->pm.mutex); 1541 1542 return ret; 1543 } 1544 1545 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1546 uint32_t speed) 1547 { 1548 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1549 int ret = 0; 1550 1551 if (!pp_funcs->set_fan_speed_rpm) 1552 return -EOPNOTSUPP; 1553 1554 mutex_lock(&adev->pm.mutex); 1555 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1556 speed); 1557 mutex_unlock(&adev->pm.mutex); 1558 1559 return ret; 1560 } 1561 1562 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1563 uint32_t mode) 1564 { 1565 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1566 int ret = 0; 1567 1568 if (!pp_funcs->set_fan_control_mode) 1569 return -EOPNOTSUPP; 1570 1571 mutex_lock(&adev->pm.mutex); 1572 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1573 mode); 1574 mutex_unlock(&adev->pm.mutex); 1575 1576 return ret; 1577 } 1578 1579 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1580 uint32_t *limit, 1581 enum pp_power_limit_level pp_limit_level, 1582 enum pp_power_type power_type) 1583 { 1584 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1585 int ret = 0; 1586 1587 if (!pp_funcs->get_power_limit) 1588 return -ENODATA; 1589 1590 mutex_lock(&adev->pm.mutex); 1591 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1592 limit, 1593 pp_limit_level, 1594 power_type); 1595 mutex_unlock(&adev->pm.mutex); 1596 1597 return ret; 1598 } 1599 1600 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1601 uint32_t limit) 1602 { 1603 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1604 int ret = 0; 1605 1606 if (!pp_funcs->set_power_limit) 1607 return -EINVAL; 1608 1609 mutex_lock(&adev->pm.mutex); 1610 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1611 limit); 1612 mutex_unlock(&adev->pm.mutex); 1613 1614 return ret; 1615 } 1616 1617 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1618 { 1619 bool cclk_dpm_supported = false; 1620 1621 if (!is_support_sw_smu(adev)) 1622 return false; 1623 1624 mutex_lock(&adev->pm.mutex); 1625 cclk_dpm_supported = is_support_cclk_dpm(adev); 1626 mutex_unlock(&adev->pm.mutex); 1627 1628 return (int)cclk_dpm_supported; 1629 } 1630 1631 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1632 struct seq_file *m) 1633 { 1634 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1635 1636 if (!pp_funcs->debugfs_print_current_performance_level) 1637 return -EOPNOTSUPP; 1638 1639 mutex_lock(&adev->pm.mutex); 1640 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1641 m); 1642 mutex_unlock(&adev->pm.mutex); 1643 1644 return 0; 1645 } 1646 1647 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1648 void **addr, 1649 size_t *size) 1650 { 1651 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1652 int ret = 0; 1653 1654 if (!pp_funcs->get_smu_prv_buf_details) 1655 return -ENOSYS; 1656 1657 mutex_lock(&adev->pm.mutex); 1658 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1659 addr, 1660 size); 1661 mutex_unlock(&adev->pm.mutex); 1662 1663 return ret; 1664 } 1665 1666 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1667 { 1668 if (is_support_sw_smu(adev)) { 1669 struct smu_context *smu = adev->powerplay.pp_handle; 1670 1671 return (smu->od_enabled || smu->is_apu); 1672 } else { 1673 struct pp_hwmgr *hwmgr; 1674 1675 /* 1676 * dpm on some legacy asics don't carry od_enabled member 1677 * as its pp_handle is casted directly from adev. 1678 */ 1679 if (amdgpu_dpm_is_legacy_dpm(adev)) 1680 return false; 1681 1682 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1683 1684 return hwmgr->od_enabled; 1685 } 1686 } 1687 1688 int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev) 1689 { 1690 if (is_support_sw_smu(adev)) { 1691 struct smu_context *smu = adev->powerplay.pp_handle; 1692 1693 return smu->od_enabled; 1694 } else { 1695 struct pp_hwmgr *hwmgr; 1696 1697 /* 1698 * dpm on some legacy asics don't carry od_enabled member 1699 * as its pp_handle is casted directly from adev. 1700 */ 1701 if (amdgpu_dpm_is_legacy_dpm(adev)) 1702 return false; 1703 1704 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1705 1706 return hwmgr->od_enabled; 1707 } 1708 } 1709 1710 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1711 const char *buf, 1712 size_t size) 1713 { 1714 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1715 int ret = 0; 1716 1717 if (!pp_funcs->set_pp_table) 1718 return -EOPNOTSUPP; 1719 1720 mutex_lock(&adev->pm.mutex); 1721 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1722 buf, 1723 size); 1724 mutex_unlock(&adev->pm.mutex); 1725 1726 return ret; 1727 } 1728 1729 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1730 { 1731 struct smu_context *smu = adev->powerplay.pp_handle; 1732 1733 if (!is_support_sw_smu(adev)) 1734 return INT_MAX; 1735 1736 return smu->cpu_core_num; 1737 } 1738 1739 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1740 { 1741 if (!is_support_sw_smu(adev)) 1742 return; 1743 1744 amdgpu_smu_stb_debug_fs_init(adev); 1745 } 1746 1747 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1748 const struct amd_pp_display_configuration *input) 1749 { 1750 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1751 int ret = 0; 1752 1753 if (!pp_funcs->display_configuration_change) 1754 return 0; 1755 1756 mutex_lock(&adev->pm.mutex); 1757 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1758 input); 1759 mutex_unlock(&adev->pm.mutex); 1760 1761 return ret; 1762 } 1763 1764 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1765 enum amd_pp_clock_type type, 1766 struct amd_pp_clocks *clocks) 1767 { 1768 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1769 int ret = 0; 1770 1771 if (!pp_funcs->get_clock_by_type) 1772 return 0; 1773 1774 mutex_lock(&adev->pm.mutex); 1775 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1776 type, 1777 clocks); 1778 mutex_unlock(&adev->pm.mutex); 1779 1780 return ret; 1781 } 1782 1783 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1784 struct amd_pp_simple_clock_info *clocks) 1785 { 1786 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1787 int ret = 0; 1788 1789 if (!pp_funcs->get_display_mode_validation_clocks) 1790 return 0; 1791 1792 mutex_lock(&adev->pm.mutex); 1793 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1794 clocks); 1795 mutex_unlock(&adev->pm.mutex); 1796 1797 return ret; 1798 } 1799 1800 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1801 enum amd_pp_clock_type type, 1802 struct pp_clock_levels_with_latency *clocks) 1803 { 1804 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1805 int ret = 0; 1806 1807 if (!pp_funcs->get_clock_by_type_with_latency) 1808 return 0; 1809 1810 mutex_lock(&adev->pm.mutex); 1811 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1812 type, 1813 clocks); 1814 mutex_unlock(&adev->pm.mutex); 1815 1816 return ret; 1817 } 1818 1819 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1820 enum amd_pp_clock_type type, 1821 struct pp_clock_levels_with_voltage *clocks) 1822 { 1823 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1824 int ret = 0; 1825 1826 if (!pp_funcs->get_clock_by_type_with_voltage) 1827 return 0; 1828 1829 mutex_lock(&adev->pm.mutex); 1830 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1831 type, 1832 clocks); 1833 mutex_unlock(&adev->pm.mutex); 1834 1835 return ret; 1836 } 1837 1838 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1839 void *clock_ranges) 1840 { 1841 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1842 int ret = 0; 1843 1844 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1845 return -EOPNOTSUPP; 1846 1847 mutex_lock(&adev->pm.mutex); 1848 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1849 clock_ranges); 1850 mutex_unlock(&adev->pm.mutex); 1851 1852 return ret; 1853 } 1854 1855 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1856 struct pp_display_clock_request *clock) 1857 { 1858 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1859 int ret = 0; 1860 1861 if (!pp_funcs->display_clock_voltage_request) 1862 return -EOPNOTSUPP; 1863 1864 mutex_lock(&adev->pm.mutex); 1865 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1866 clock); 1867 mutex_unlock(&adev->pm.mutex); 1868 1869 return ret; 1870 } 1871 1872 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1873 struct amd_pp_clock_info *clocks) 1874 { 1875 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1876 int ret = 0; 1877 1878 if (!pp_funcs->get_current_clocks) 1879 return -EOPNOTSUPP; 1880 1881 mutex_lock(&adev->pm.mutex); 1882 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1883 clocks); 1884 mutex_unlock(&adev->pm.mutex); 1885 1886 return ret; 1887 } 1888 1889 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1890 { 1891 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1892 1893 if (!pp_funcs->notify_smu_enable_pwe) 1894 return; 1895 1896 mutex_lock(&adev->pm.mutex); 1897 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1898 mutex_unlock(&adev->pm.mutex); 1899 } 1900 1901 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1902 uint32_t count) 1903 { 1904 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1905 int ret = 0; 1906 1907 if (!pp_funcs->set_active_display_count) 1908 return -EOPNOTSUPP; 1909 1910 mutex_lock(&adev->pm.mutex); 1911 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1912 count); 1913 mutex_unlock(&adev->pm.mutex); 1914 1915 return ret; 1916 } 1917 1918 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1919 uint32_t clock) 1920 { 1921 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1922 int ret = 0; 1923 1924 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1925 return -EOPNOTSUPP; 1926 1927 mutex_lock(&adev->pm.mutex); 1928 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1929 clock); 1930 mutex_unlock(&adev->pm.mutex); 1931 1932 return ret; 1933 } 1934 1935 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1936 uint32_t clock) 1937 { 1938 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1939 1940 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1941 return; 1942 1943 mutex_lock(&adev->pm.mutex); 1944 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1945 clock); 1946 mutex_unlock(&adev->pm.mutex); 1947 } 1948 1949 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1950 uint32_t clock) 1951 { 1952 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1953 1954 if (!pp_funcs->set_hard_min_fclk_by_freq) 1955 return; 1956 1957 mutex_lock(&adev->pm.mutex); 1958 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1959 clock); 1960 mutex_unlock(&adev->pm.mutex); 1961 } 1962 1963 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1964 bool disable_memory_clock_switch) 1965 { 1966 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1967 int ret = 0; 1968 1969 if (!pp_funcs->display_disable_memory_clock_switch) 1970 return 0; 1971 1972 mutex_lock(&adev->pm.mutex); 1973 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1974 disable_memory_clock_switch); 1975 mutex_unlock(&adev->pm.mutex); 1976 1977 return ret; 1978 } 1979 1980 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1981 struct pp_smu_nv_clock_table *max_clocks) 1982 { 1983 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1984 int ret = 0; 1985 1986 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1987 return -EOPNOTSUPP; 1988 1989 mutex_lock(&adev->pm.mutex); 1990 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1991 max_clocks); 1992 mutex_unlock(&adev->pm.mutex); 1993 1994 return ret; 1995 } 1996 1997 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1998 unsigned int *clock_values_in_khz, 1999 unsigned int *num_states) 2000 { 2001 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2002 int ret = 0; 2003 2004 if (!pp_funcs->get_uclk_dpm_states) 2005 return -EOPNOTSUPP; 2006 2007 mutex_lock(&adev->pm.mutex); 2008 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 2009 clock_values_in_khz, 2010 num_states); 2011 mutex_unlock(&adev->pm.mutex); 2012 2013 return ret; 2014 } 2015 2016 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 2017 struct dpm_clocks *clock_table) 2018 { 2019 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2020 int ret = 0; 2021 2022 if (!pp_funcs->get_dpm_clock_table) 2023 return -EOPNOTSUPP; 2024 2025 mutex_lock(&adev->pm.mutex); 2026 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 2027 clock_table); 2028 mutex_unlock(&adev->pm.mutex); 2029 2030 return ret; 2031 } 2032 2033 /** 2034 * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute 2035 * partition 2036 * @adev: Pointer to the device. 2037 * @type: Identifier for the temperature type metrics to be fetched. 2038 * @table: Pointer to a buffer where the metrics will be stored. If NULL, the 2039 * function returns the size of the metrics structure. 2040 * 2041 * This function retrieves metrics for a specific temperature type, If the 2042 * table parameter is NULL, the function returns the size of the metrics 2043 * structure without populating it. 2044 * 2045 * Return: Size of the metrics structure on success, or a negative error code on failure. 2046 */ 2047 ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev, 2048 enum smu_temp_metric_type type, void *table) 2049 { 2050 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2051 int ret; 2052 2053 if (!pp_funcs->get_temp_metrics || 2054 !amdgpu_dpm_is_temp_metrics_supported(adev, type)) 2055 return -EOPNOTSUPP; 2056 2057 mutex_lock(&adev->pm.mutex); 2058 ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table); 2059 mutex_unlock(&adev->pm.mutex); 2060 2061 return ret; 2062 } 2063 2064 /** 2065 * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support 2066 * is available 2067 * @adev: Pointer to the device. 2068 * @type: Identifier for the temperature type metrics to be fetched. 2069 * 2070 * This function returns metrics if specific temperature metrics type is supported or not. 2071 * 2072 * Return: True in case of metrics type supported else false. 2073 */ 2074 bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev, 2075 enum smu_temp_metric_type type) 2076 { 2077 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2078 bool support_temp_metrics = false; 2079 2080 if (!pp_funcs->temp_metrics_is_supported) 2081 return support_temp_metrics; 2082 2083 if (is_support_sw_smu(adev)) { 2084 mutex_lock(&adev->pm.mutex); 2085 support_temp_metrics = 2086 pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type); 2087 mutex_unlock(&adev->pm.mutex); 2088 } 2089 2090 return support_temp_metrics; 2091 } 2092 2093 /** 2094 * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute 2095 * partition 2096 * @adev: Pointer to the device. 2097 * @xcp_id: Identifier of the XCP for which metrics are to be retrieved. 2098 * @table: Pointer to a buffer where the metrics will be stored. If NULL, the 2099 * function returns the size of the metrics structure. 2100 * 2101 * This function retrieves metrics for a specific XCP, including details such as 2102 * VCN/JPEG activity, clock frequencies, and other performance metrics. If the 2103 * table parameter is NULL, the function returns the size of the metrics 2104 * structure without populating it. 2105 * 2106 * Return: Size of the metrics structure on success, or a negative error code on failure. 2107 */ 2108 ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, 2109 void *table) 2110 { 2111 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2112 int ret = 0; 2113 2114 if (!pp_funcs->get_xcp_metrics) 2115 return 0; 2116 2117 mutex_lock(&adev->pm.mutex); 2118 ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id, 2119 table); 2120 mutex_unlock(&adev->pm.mutex); 2121 2122 return ret; 2123 } 2124