1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 74 { 75 int ret = 0; 76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 78 79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 80 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 81 block_type, gate ? "gate" : "ungate"); 82 return 0; 83 } 84 85 mutex_lock(&adev->pm.mutex); 86 87 switch (block_type) { 88 case AMD_IP_BLOCK_TYPE_UVD: 89 case AMD_IP_BLOCK_TYPE_VCE: 90 case AMD_IP_BLOCK_TYPE_GFX: 91 case AMD_IP_BLOCK_TYPE_VCN: 92 case AMD_IP_BLOCK_TYPE_SDMA: 93 case AMD_IP_BLOCK_TYPE_JPEG: 94 case AMD_IP_BLOCK_TYPE_GMC: 95 case AMD_IP_BLOCK_TYPE_ACP: 96 case AMD_IP_BLOCK_TYPE_VPE: 97 if (pp_funcs && pp_funcs->set_powergating_by_smu) 98 ret = (pp_funcs->set_powergating_by_smu( 99 (adev)->powerplay.pp_handle, block_type, gate)); 100 break; 101 default: 102 break; 103 } 104 105 if (!ret) 106 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 107 108 mutex_unlock(&adev->pm.mutex); 109 110 return ret; 111 } 112 113 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 114 { 115 struct smu_context *smu = adev->powerplay.pp_handle; 116 int ret = -EOPNOTSUPP; 117 118 mutex_lock(&adev->pm.mutex); 119 ret = smu_set_gfx_power_up_by_imu(smu); 120 mutex_unlock(&adev->pm.mutex); 121 122 msleep(10); 123 124 return ret; 125 } 126 127 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 128 { 129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 130 void *pp_handle = adev->powerplay.pp_handle; 131 int ret = 0; 132 133 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 134 return -ENOENT; 135 136 mutex_lock(&adev->pm.mutex); 137 138 /* enter BACO state */ 139 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 140 141 mutex_unlock(&adev->pm.mutex); 142 143 return ret; 144 } 145 146 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 147 { 148 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 149 void *pp_handle = adev->powerplay.pp_handle; 150 int ret = 0; 151 152 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 153 return -ENOENT; 154 155 mutex_lock(&adev->pm.mutex); 156 157 /* exit BACO state */ 158 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 159 160 mutex_unlock(&adev->pm.mutex); 161 162 return ret; 163 } 164 165 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 166 enum pp_mp1_state mp1_state) 167 { 168 int ret = 0; 169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 170 171 if (pp_funcs && pp_funcs->set_mp1_state) { 172 mutex_lock(&adev->pm.mutex); 173 174 ret = pp_funcs->set_mp1_state( 175 adev->powerplay.pp_handle, 176 mp1_state); 177 178 mutex_unlock(&adev->pm.mutex); 179 } 180 181 return ret; 182 } 183 184 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) 185 { 186 int ret = 0; 187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 188 189 if (pp_funcs && pp_funcs->notify_rlc_state) { 190 mutex_lock(&adev->pm.mutex); 191 192 ret = pp_funcs->notify_rlc_state( 193 adev->powerplay.pp_handle, 194 en); 195 196 mutex_unlock(&adev->pm.mutex); 197 } 198 199 return ret; 200 } 201 202 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 203 { 204 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 205 void *pp_handle = adev->powerplay.pp_handle; 206 bool ret; 207 208 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 209 return false; 210 /* Don't use baco for reset in S3. 211 * This is a workaround for some platforms 212 * where entering BACO during suspend 213 * seems to cause reboots or hangs. 214 * This might be related to the fact that BACO controls 215 * power to the whole GPU including devices like audio and USB. 216 * Powering down/up everything may adversely affect these other 217 * devices. Needs more investigation. 218 */ 219 if (adev->in_s3) 220 return false; 221 222 mutex_lock(&adev->pm.mutex); 223 224 ret = pp_funcs->get_asic_baco_capability(pp_handle); 225 226 mutex_unlock(&adev->pm.mutex); 227 228 return ret; 229 } 230 231 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 232 { 233 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 234 void *pp_handle = adev->powerplay.pp_handle; 235 int ret = 0; 236 237 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 238 return -ENOENT; 239 240 mutex_lock(&adev->pm.mutex); 241 242 ret = pp_funcs->asic_reset_mode_2(pp_handle); 243 244 mutex_unlock(&adev->pm.mutex); 245 246 return ret; 247 } 248 249 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 250 { 251 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 252 void *pp_handle = adev->powerplay.pp_handle; 253 int ret = 0; 254 255 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 256 return -ENOENT; 257 258 mutex_lock(&adev->pm.mutex); 259 260 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 261 262 mutex_unlock(&adev->pm.mutex); 263 264 return ret; 265 } 266 267 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 268 { 269 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 270 void *pp_handle = adev->powerplay.pp_handle; 271 int ret = 0; 272 273 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 274 return -ENOENT; 275 276 mutex_lock(&adev->pm.mutex); 277 278 /* enter BACO state */ 279 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 280 if (ret) 281 goto out; 282 283 /* exit BACO state */ 284 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 285 286 out: 287 mutex_unlock(&adev->pm.mutex); 288 return ret; 289 } 290 291 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 292 { 293 struct smu_context *smu = adev->powerplay.pp_handle; 294 bool support_mode1_reset = false; 295 296 if (is_support_sw_smu(adev)) { 297 mutex_lock(&adev->pm.mutex); 298 support_mode1_reset = smu_mode1_reset_is_support(smu); 299 mutex_unlock(&adev->pm.mutex); 300 } 301 302 return support_mode1_reset; 303 } 304 305 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 306 { 307 struct smu_context *smu = adev->powerplay.pp_handle; 308 int ret = -EOPNOTSUPP; 309 310 if (is_support_sw_smu(adev)) { 311 mutex_lock(&adev->pm.mutex); 312 ret = smu_mode1_reset(smu); 313 mutex_unlock(&adev->pm.mutex); 314 } 315 316 return ret; 317 } 318 319 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 320 enum PP_SMC_POWER_PROFILE type, 321 bool en) 322 { 323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 324 int ret = 0; 325 326 if (amdgpu_sriov_vf(adev)) 327 return 0; 328 329 if (pp_funcs && pp_funcs->switch_power_profile) { 330 mutex_lock(&adev->pm.mutex); 331 ret = pp_funcs->switch_power_profile( 332 adev->powerplay.pp_handle, type, en); 333 mutex_unlock(&adev->pm.mutex); 334 } 335 336 return ret; 337 } 338 339 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 340 uint32_t pstate) 341 { 342 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 343 int ret = 0; 344 345 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 346 mutex_lock(&adev->pm.mutex); 347 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 348 pstate); 349 mutex_unlock(&adev->pm.mutex); 350 } 351 352 return ret; 353 } 354 355 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 356 uint32_t cstate) 357 { 358 int ret = 0; 359 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 360 void *pp_handle = adev->powerplay.pp_handle; 361 362 if (pp_funcs && pp_funcs->set_df_cstate) { 363 mutex_lock(&adev->pm.mutex); 364 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 365 mutex_unlock(&adev->pm.mutex); 366 } 367 368 return ret; 369 } 370 371 int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc) 372 { 373 struct smu_context *smu = adev->powerplay.pp_handle; 374 int mode = XGMI_PLPD_NONE; 375 376 if (is_support_sw_smu(adev)) { 377 mode = smu->plpd_mode; 378 if (mode_desc == NULL) 379 return mode; 380 switch (smu->plpd_mode) { 381 case XGMI_PLPD_DISALLOW: 382 *mode_desc = "disallow"; 383 break; 384 case XGMI_PLPD_DEFAULT: 385 *mode_desc = "default"; 386 break; 387 case XGMI_PLPD_OPTIMIZED: 388 *mode_desc = "optimized"; 389 break; 390 case XGMI_PLPD_NONE: 391 default: 392 *mode_desc = "none"; 393 break; 394 } 395 } 396 397 return mode; 398 } 399 400 int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode) 401 { 402 struct smu_context *smu = adev->powerplay.pp_handle; 403 int ret = -EOPNOTSUPP; 404 405 if (is_support_sw_smu(adev)) { 406 mutex_lock(&adev->pm.mutex); 407 ret = smu_set_xgmi_plpd_mode(smu, mode); 408 mutex_unlock(&adev->pm.mutex); 409 } 410 411 return ret; 412 } 413 414 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 415 { 416 void *pp_handle = adev->powerplay.pp_handle; 417 const struct amd_pm_funcs *pp_funcs = 418 adev->powerplay.pp_funcs; 419 int ret = 0; 420 421 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 422 mutex_lock(&adev->pm.mutex); 423 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 424 mutex_unlock(&adev->pm.mutex); 425 } 426 427 return ret; 428 } 429 430 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 431 uint32_t msg_id) 432 { 433 void *pp_handle = adev->powerplay.pp_handle; 434 const struct amd_pm_funcs *pp_funcs = 435 adev->powerplay.pp_funcs; 436 int ret = 0; 437 438 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 439 mutex_lock(&adev->pm.mutex); 440 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 441 msg_id); 442 mutex_unlock(&adev->pm.mutex); 443 } 444 445 return ret; 446 } 447 448 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 449 bool acquire) 450 { 451 void *pp_handle = adev->powerplay.pp_handle; 452 const struct amd_pm_funcs *pp_funcs = 453 adev->powerplay.pp_funcs; 454 int ret = -EOPNOTSUPP; 455 456 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 457 mutex_lock(&adev->pm.mutex); 458 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 459 acquire); 460 mutex_unlock(&adev->pm.mutex); 461 } 462 463 return ret; 464 } 465 466 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 467 { 468 if (adev->pm.dpm_enabled) { 469 mutex_lock(&adev->pm.mutex); 470 if (power_supply_is_system_supplied() > 0) 471 adev->pm.ac_power = true; 472 else 473 adev->pm.ac_power = false; 474 475 if (adev->powerplay.pp_funcs && 476 adev->powerplay.pp_funcs->enable_bapm) 477 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 478 479 if (is_support_sw_smu(adev)) 480 smu_set_ac_dc(adev->powerplay.pp_handle); 481 482 mutex_unlock(&adev->pm.mutex); 483 } 484 } 485 486 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 487 void *data, uint32_t *size) 488 { 489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 490 int ret = -EINVAL; 491 492 if (!data || !size) 493 return -EINVAL; 494 495 if (pp_funcs && pp_funcs->read_sensor) { 496 mutex_lock(&adev->pm.mutex); 497 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 498 sensor, 499 data, 500 size); 501 mutex_unlock(&adev->pm.mutex); 502 } 503 504 return ret; 505 } 506 507 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 508 { 509 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 510 int ret = -EOPNOTSUPP; 511 512 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 513 mutex_lock(&adev->pm.mutex); 514 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 515 mutex_unlock(&adev->pm.mutex); 516 } 517 518 return ret; 519 } 520 521 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 522 { 523 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 524 int ret = -EOPNOTSUPP; 525 526 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 527 mutex_lock(&adev->pm.mutex); 528 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 529 mutex_unlock(&adev->pm.mutex); 530 } 531 532 return ret; 533 } 534 535 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 536 { 537 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 538 int i; 539 540 if (!adev->pm.dpm_enabled) 541 return; 542 543 if (!pp_funcs->pm_compute_clocks) 544 return; 545 546 if (adev->mode_info.num_crtc) 547 amdgpu_display_bandwidth_update(adev); 548 549 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 550 struct amdgpu_ring *ring = adev->rings[i]; 551 if (ring && ring->sched.ready) 552 amdgpu_fence_wait_empty(ring); 553 } 554 555 mutex_lock(&adev->pm.mutex); 556 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 557 mutex_unlock(&adev->pm.mutex); 558 } 559 560 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 561 { 562 int ret = 0; 563 564 if (adev->family == AMDGPU_FAMILY_SI) { 565 mutex_lock(&adev->pm.mutex); 566 if (enable) { 567 adev->pm.dpm.uvd_active = true; 568 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 569 } else { 570 adev->pm.dpm.uvd_active = false; 571 } 572 mutex_unlock(&adev->pm.mutex); 573 574 amdgpu_dpm_compute_clocks(adev); 575 return; 576 } 577 578 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 579 if (ret) 580 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 581 enable ? "enable" : "disable", ret); 582 } 583 584 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 585 { 586 int ret = 0; 587 588 if (adev->family == AMDGPU_FAMILY_SI) { 589 mutex_lock(&adev->pm.mutex); 590 if (enable) { 591 adev->pm.dpm.vce_active = true; 592 /* XXX select vce level based on ring/task */ 593 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 594 } else { 595 adev->pm.dpm.vce_active = false; 596 } 597 mutex_unlock(&adev->pm.mutex); 598 599 amdgpu_dpm_compute_clocks(adev); 600 return; 601 } 602 603 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 604 if (ret) 605 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 606 enable ? "enable" : "disable", ret); 607 } 608 609 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 610 { 611 int ret = 0; 612 613 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 614 if (ret) 615 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 616 enable ? "enable" : "disable", ret); 617 } 618 619 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 620 { 621 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 622 int r = 0; 623 624 if (!pp_funcs || !pp_funcs->load_firmware) 625 return 0; 626 627 mutex_lock(&adev->pm.mutex); 628 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 629 if (r) { 630 pr_err("smu firmware loading failed\n"); 631 goto out; 632 } 633 634 if (smu_version) 635 *smu_version = adev->pm.fw_version; 636 637 out: 638 mutex_unlock(&adev->pm.mutex); 639 return r; 640 } 641 642 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 643 { 644 int ret = 0; 645 646 if (is_support_sw_smu(adev)) { 647 mutex_lock(&adev->pm.mutex); 648 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 649 enable); 650 mutex_unlock(&adev->pm.mutex); 651 } 652 653 return ret; 654 } 655 656 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 657 { 658 struct smu_context *smu = adev->powerplay.pp_handle; 659 int ret = 0; 660 661 if (!is_support_sw_smu(adev)) 662 return -EOPNOTSUPP; 663 664 mutex_lock(&adev->pm.mutex); 665 ret = smu_send_hbm_bad_pages_num(smu, size); 666 mutex_unlock(&adev->pm.mutex); 667 668 return ret; 669 } 670 671 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 672 { 673 struct smu_context *smu = adev->powerplay.pp_handle; 674 int ret = 0; 675 676 if (!is_support_sw_smu(adev)) 677 return -EOPNOTSUPP; 678 679 mutex_lock(&adev->pm.mutex); 680 ret = smu_send_hbm_bad_channel_flag(smu, size); 681 mutex_unlock(&adev->pm.mutex); 682 683 return ret; 684 } 685 686 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 687 enum pp_clock_type type, 688 uint32_t *min, 689 uint32_t *max) 690 { 691 int ret = 0; 692 693 if (type != PP_SCLK) 694 return -EINVAL; 695 696 if (!is_support_sw_smu(adev)) 697 return -EOPNOTSUPP; 698 699 mutex_lock(&adev->pm.mutex); 700 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 701 SMU_SCLK, 702 min, 703 max); 704 mutex_unlock(&adev->pm.mutex); 705 706 return ret; 707 } 708 709 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 710 enum pp_clock_type type, 711 uint32_t min, 712 uint32_t max) 713 { 714 struct smu_context *smu = adev->powerplay.pp_handle; 715 int ret = 0; 716 717 if (type != PP_SCLK) 718 return -EINVAL; 719 720 if (!is_support_sw_smu(adev)) 721 return -EOPNOTSUPP; 722 723 mutex_lock(&adev->pm.mutex); 724 ret = smu_set_soft_freq_range(smu, 725 SMU_SCLK, 726 min, 727 max); 728 mutex_unlock(&adev->pm.mutex); 729 730 return ret; 731 } 732 733 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 734 { 735 struct smu_context *smu = adev->powerplay.pp_handle; 736 int ret = 0; 737 738 if (!is_support_sw_smu(adev)) 739 return 0; 740 741 mutex_lock(&adev->pm.mutex); 742 ret = smu_write_watermarks_table(smu); 743 mutex_unlock(&adev->pm.mutex); 744 745 return ret; 746 } 747 748 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 749 enum smu_event_type event, 750 uint64_t event_arg) 751 { 752 struct smu_context *smu = adev->powerplay.pp_handle; 753 int ret = 0; 754 755 if (!is_support_sw_smu(adev)) 756 return -EOPNOTSUPP; 757 758 mutex_lock(&adev->pm.mutex); 759 ret = smu_wait_for_event(smu, event, event_arg); 760 mutex_unlock(&adev->pm.mutex); 761 762 return ret; 763 } 764 765 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 766 { 767 struct smu_context *smu = adev->powerplay.pp_handle; 768 int ret = 0; 769 770 if (!is_support_sw_smu(adev)) 771 return -EOPNOTSUPP; 772 773 mutex_lock(&adev->pm.mutex); 774 ret = smu_set_residency_gfxoff(smu, value); 775 mutex_unlock(&adev->pm.mutex); 776 777 return ret; 778 } 779 780 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 781 { 782 struct smu_context *smu = adev->powerplay.pp_handle; 783 int ret = 0; 784 785 if (!is_support_sw_smu(adev)) 786 return -EOPNOTSUPP; 787 788 mutex_lock(&adev->pm.mutex); 789 ret = smu_get_residency_gfxoff(smu, value); 790 mutex_unlock(&adev->pm.mutex); 791 792 return ret; 793 } 794 795 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 796 { 797 struct smu_context *smu = adev->powerplay.pp_handle; 798 int ret = 0; 799 800 if (!is_support_sw_smu(adev)) 801 return -EOPNOTSUPP; 802 803 mutex_lock(&adev->pm.mutex); 804 ret = smu_get_entrycount_gfxoff(smu, value); 805 mutex_unlock(&adev->pm.mutex); 806 807 return ret; 808 } 809 810 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 811 { 812 struct smu_context *smu = adev->powerplay.pp_handle; 813 int ret = 0; 814 815 if (!is_support_sw_smu(adev)) 816 return -EOPNOTSUPP; 817 818 mutex_lock(&adev->pm.mutex); 819 ret = smu_get_status_gfxoff(smu, value); 820 mutex_unlock(&adev->pm.mutex); 821 822 return ret; 823 } 824 825 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 826 { 827 struct smu_context *smu = adev->powerplay.pp_handle; 828 829 if (!is_support_sw_smu(adev)) 830 return 0; 831 832 return atomic64_read(&smu->throttle_int_counter); 833 } 834 835 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 836 * @adev: amdgpu_device pointer 837 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 838 * 839 */ 840 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 841 enum gfx_change_state state) 842 { 843 mutex_lock(&adev->pm.mutex); 844 if (adev->powerplay.pp_funcs && 845 adev->powerplay.pp_funcs->gfx_state_change_set) 846 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 847 (adev)->powerplay.pp_handle, state)); 848 mutex_unlock(&adev->pm.mutex); 849 } 850 851 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 852 void *umc_ecc) 853 { 854 struct smu_context *smu = adev->powerplay.pp_handle; 855 int ret = 0; 856 857 if (!is_support_sw_smu(adev)) 858 return -EOPNOTSUPP; 859 860 mutex_lock(&adev->pm.mutex); 861 ret = smu_get_ecc_info(smu, umc_ecc); 862 mutex_unlock(&adev->pm.mutex); 863 864 return ret; 865 } 866 867 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 868 uint32_t idx) 869 { 870 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 871 struct amd_vce_state *vstate = NULL; 872 873 if (!pp_funcs->get_vce_clock_state) 874 return NULL; 875 876 mutex_lock(&adev->pm.mutex); 877 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 878 idx); 879 mutex_unlock(&adev->pm.mutex); 880 881 return vstate; 882 } 883 884 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 885 enum amd_pm_state_type *state) 886 { 887 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 888 889 mutex_lock(&adev->pm.mutex); 890 891 if (!pp_funcs->get_current_power_state) { 892 *state = adev->pm.dpm.user_state; 893 goto out; 894 } 895 896 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 897 if (*state < POWER_STATE_TYPE_DEFAULT || 898 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 899 *state = adev->pm.dpm.user_state; 900 901 out: 902 mutex_unlock(&adev->pm.mutex); 903 } 904 905 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 906 enum amd_pm_state_type state) 907 { 908 mutex_lock(&adev->pm.mutex); 909 adev->pm.dpm.user_state = state; 910 mutex_unlock(&adev->pm.mutex); 911 912 if (is_support_sw_smu(adev)) 913 return; 914 915 if (amdgpu_dpm_dispatch_task(adev, 916 AMD_PP_TASK_ENABLE_USER_STATE, 917 &state) == -EOPNOTSUPP) 918 amdgpu_dpm_compute_clocks(adev); 919 } 920 921 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 922 { 923 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 924 enum amd_dpm_forced_level level; 925 926 if (!pp_funcs) 927 return AMD_DPM_FORCED_LEVEL_AUTO; 928 929 mutex_lock(&adev->pm.mutex); 930 if (pp_funcs->get_performance_level) 931 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 932 else 933 level = adev->pm.dpm.forced_level; 934 mutex_unlock(&adev->pm.mutex); 935 936 return level; 937 } 938 939 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 940 enum amd_dpm_forced_level level) 941 { 942 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 943 enum amd_dpm_forced_level current_level; 944 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 945 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 946 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 947 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 948 949 if (!pp_funcs || !pp_funcs->force_performance_level) 950 return 0; 951 952 if (adev->pm.dpm.thermal_active) 953 return -EINVAL; 954 955 current_level = amdgpu_dpm_get_performance_level(adev); 956 if (current_level == level) 957 return 0; 958 959 if (adev->asic_type == CHIP_RAVEN) { 960 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 961 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 962 level == AMD_DPM_FORCED_LEVEL_MANUAL) 963 amdgpu_gfx_off_ctrl(adev, false); 964 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 965 level != AMD_DPM_FORCED_LEVEL_MANUAL) 966 amdgpu_gfx_off_ctrl(adev, true); 967 } 968 } 969 970 if (!(current_level & profile_mode_mask) && 971 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 972 return -EINVAL; 973 974 if (!(current_level & profile_mode_mask) && 975 (level & profile_mode_mask)) { 976 /* enter UMD Pstate */ 977 amdgpu_device_ip_set_powergating_state(adev, 978 AMD_IP_BLOCK_TYPE_GFX, 979 AMD_PG_STATE_UNGATE); 980 amdgpu_device_ip_set_clockgating_state(adev, 981 AMD_IP_BLOCK_TYPE_GFX, 982 AMD_CG_STATE_UNGATE); 983 } else if ((current_level & profile_mode_mask) && 984 !(level & profile_mode_mask)) { 985 /* exit UMD Pstate */ 986 amdgpu_device_ip_set_clockgating_state(adev, 987 AMD_IP_BLOCK_TYPE_GFX, 988 AMD_CG_STATE_GATE); 989 amdgpu_device_ip_set_powergating_state(adev, 990 AMD_IP_BLOCK_TYPE_GFX, 991 AMD_PG_STATE_GATE); 992 } 993 994 mutex_lock(&adev->pm.mutex); 995 996 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 997 level)) { 998 mutex_unlock(&adev->pm.mutex); 999 return -EINVAL; 1000 } 1001 1002 adev->pm.dpm.forced_level = level; 1003 1004 mutex_unlock(&adev->pm.mutex); 1005 1006 return 0; 1007 } 1008 1009 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 1010 struct pp_states_info *states) 1011 { 1012 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1013 int ret = 0; 1014 1015 if (!pp_funcs->get_pp_num_states) 1016 return -EOPNOTSUPP; 1017 1018 mutex_lock(&adev->pm.mutex); 1019 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 1020 states); 1021 mutex_unlock(&adev->pm.mutex); 1022 1023 return ret; 1024 } 1025 1026 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 1027 enum amd_pp_task task_id, 1028 enum amd_pm_state_type *user_state) 1029 { 1030 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1031 int ret = 0; 1032 1033 if (!pp_funcs->dispatch_tasks) 1034 return -EOPNOTSUPP; 1035 1036 mutex_lock(&adev->pm.mutex); 1037 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 1038 task_id, 1039 user_state); 1040 mutex_unlock(&adev->pm.mutex); 1041 1042 return ret; 1043 } 1044 1045 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1046 { 1047 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1048 int ret = 0; 1049 1050 if (!pp_funcs->get_pp_table) 1051 return 0; 1052 1053 mutex_lock(&adev->pm.mutex); 1054 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1055 table); 1056 mutex_unlock(&adev->pm.mutex); 1057 1058 return ret; 1059 } 1060 1061 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1062 uint32_t type, 1063 long *input, 1064 uint32_t size) 1065 { 1066 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1067 int ret = 0; 1068 1069 if (!pp_funcs->set_fine_grain_clk_vol) 1070 return 0; 1071 1072 mutex_lock(&adev->pm.mutex); 1073 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1074 type, 1075 input, 1076 size); 1077 mutex_unlock(&adev->pm.mutex); 1078 1079 return ret; 1080 } 1081 1082 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1083 uint32_t type, 1084 long *input, 1085 uint32_t size) 1086 { 1087 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1088 int ret = 0; 1089 1090 if (!pp_funcs->odn_edit_dpm_table) 1091 return 0; 1092 1093 mutex_lock(&adev->pm.mutex); 1094 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1095 type, 1096 input, 1097 size); 1098 mutex_unlock(&adev->pm.mutex); 1099 1100 return ret; 1101 } 1102 1103 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1104 enum pp_clock_type type, 1105 char *buf) 1106 { 1107 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1108 int ret = 0; 1109 1110 if (!pp_funcs->print_clock_levels) 1111 return 0; 1112 1113 mutex_lock(&adev->pm.mutex); 1114 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1115 type, 1116 buf); 1117 mutex_unlock(&adev->pm.mutex); 1118 1119 return ret; 1120 } 1121 1122 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1123 enum pp_clock_type type, 1124 char *buf, 1125 int *offset) 1126 { 1127 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1128 int ret = 0; 1129 1130 if (!pp_funcs->emit_clock_levels) 1131 return -ENOENT; 1132 1133 mutex_lock(&adev->pm.mutex); 1134 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1135 type, 1136 buf, 1137 offset); 1138 mutex_unlock(&adev->pm.mutex); 1139 1140 return ret; 1141 } 1142 1143 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1144 uint64_t ppfeature_masks) 1145 { 1146 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1147 int ret = 0; 1148 1149 if (!pp_funcs->set_ppfeature_status) 1150 return 0; 1151 1152 mutex_lock(&adev->pm.mutex); 1153 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1154 ppfeature_masks); 1155 mutex_unlock(&adev->pm.mutex); 1156 1157 return ret; 1158 } 1159 1160 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1161 { 1162 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1163 int ret = 0; 1164 1165 if (!pp_funcs->get_ppfeature_status) 1166 return 0; 1167 1168 mutex_lock(&adev->pm.mutex); 1169 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1170 buf); 1171 mutex_unlock(&adev->pm.mutex); 1172 1173 return ret; 1174 } 1175 1176 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1177 enum pp_clock_type type, 1178 uint32_t mask) 1179 { 1180 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1181 int ret = 0; 1182 1183 if (!pp_funcs->force_clock_level) 1184 return 0; 1185 1186 mutex_lock(&adev->pm.mutex); 1187 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1188 type, 1189 mask); 1190 mutex_unlock(&adev->pm.mutex); 1191 1192 return ret; 1193 } 1194 1195 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1196 { 1197 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1198 int ret = 0; 1199 1200 if (!pp_funcs->get_sclk_od) 1201 return -EOPNOTSUPP; 1202 1203 mutex_lock(&adev->pm.mutex); 1204 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1205 mutex_unlock(&adev->pm.mutex); 1206 1207 return ret; 1208 } 1209 1210 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1211 { 1212 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1213 1214 if (is_support_sw_smu(adev)) 1215 return -EOPNOTSUPP; 1216 1217 mutex_lock(&adev->pm.mutex); 1218 if (pp_funcs->set_sclk_od) 1219 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1220 mutex_unlock(&adev->pm.mutex); 1221 1222 if (amdgpu_dpm_dispatch_task(adev, 1223 AMD_PP_TASK_READJUST_POWER_STATE, 1224 NULL) == -EOPNOTSUPP) { 1225 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1226 amdgpu_dpm_compute_clocks(adev); 1227 } 1228 1229 return 0; 1230 } 1231 1232 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1233 { 1234 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1235 int ret = 0; 1236 1237 if (!pp_funcs->get_mclk_od) 1238 return -EOPNOTSUPP; 1239 1240 mutex_lock(&adev->pm.mutex); 1241 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1242 mutex_unlock(&adev->pm.mutex); 1243 1244 return ret; 1245 } 1246 1247 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1248 { 1249 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1250 1251 if (is_support_sw_smu(adev)) 1252 return -EOPNOTSUPP; 1253 1254 mutex_lock(&adev->pm.mutex); 1255 if (pp_funcs->set_mclk_od) 1256 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1257 mutex_unlock(&adev->pm.mutex); 1258 1259 if (amdgpu_dpm_dispatch_task(adev, 1260 AMD_PP_TASK_READJUST_POWER_STATE, 1261 NULL) == -EOPNOTSUPP) { 1262 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1263 amdgpu_dpm_compute_clocks(adev); 1264 } 1265 1266 return 0; 1267 } 1268 1269 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1270 char *buf) 1271 { 1272 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1273 int ret = 0; 1274 1275 if (!pp_funcs->get_power_profile_mode) 1276 return -EOPNOTSUPP; 1277 1278 mutex_lock(&adev->pm.mutex); 1279 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1280 buf); 1281 mutex_unlock(&adev->pm.mutex); 1282 1283 return ret; 1284 } 1285 1286 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1287 long *input, uint32_t size) 1288 { 1289 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1290 int ret = 0; 1291 1292 if (!pp_funcs->set_power_profile_mode) 1293 return 0; 1294 1295 mutex_lock(&adev->pm.mutex); 1296 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1297 input, 1298 size); 1299 mutex_unlock(&adev->pm.mutex); 1300 1301 return ret; 1302 } 1303 1304 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1305 { 1306 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1307 int ret = 0; 1308 1309 if (!pp_funcs->get_gpu_metrics) 1310 return 0; 1311 1312 mutex_lock(&adev->pm.mutex); 1313 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1314 table); 1315 mutex_unlock(&adev->pm.mutex); 1316 1317 return ret; 1318 } 1319 1320 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, 1321 size_t size) 1322 { 1323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1324 int ret = 0; 1325 1326 if (!pp_funcs->get_pm_metrics) 1327 return -EOPNOTSUPP; 1328 1329 mutex_lock(&adev->pm.mutex); 1330 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, 1331 size); 1332 mutex_unlock(&adev->pm.mutex); 1333 1334 return ret; 1335 } 1336 1337 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1338 uint32_t *fan_mode) 1339 { 1340 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1341 int ret = 0; 1342 1343 if (!pp_funcs->get_fan_control_mode) 1344 return -EOPNOTSUPP; 1345 1346 mutex_lock(&adev->pm.mutex); 1347 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1348 fan_mode); 1349 mutex_unlock(&adev->pm.mutex); 1350 1351 return ret; 1352 } 1353 1354 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1355 uint32_t speed) 1356 { 1357 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1358 int ret = 0; 1359 1360 if (!pp_funcs->set_fan_speed_pwm) 1361 return -EOPNOTSUPP; 1362 1363 mutex_lock(&adev->pm.mutex); 1364 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1365 speed); 1366 mutex_unlock(&adev->pm.mutex); 1367 1368 return ret; 1369 } 1370 1371 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1372 uint32_t *speed) 1373 { 1374 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1375 int ret = 0; 1376 1377 if (!pp_funcs->get_fan_speed_pwm) 1378 return -EOPNOTSUPP; 1379 1380 mutex_lock(&adev->pm.mutex); 1381 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1382 speed); 1383 mutex_unlock(&adev->pm.mutex); 1384 1385 return ret; 1386 } 1387 1388 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1389 uint32_t *speed) 1390 { 1391 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1392 int ret = 0; 1393 1394 if (!pp_funcs->get_fan_speed_rpm) 1395 return -EOPNOTSUPP; 1396 1397 mutex_lock(&adev->pm.mutex); 1398 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1399 speed); 1400 mutex_unlock(&adev->pm.mutex); 1401 1402 return ret; 1403 } 1404 1405 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1406 uint32_t speed) 1407 { 1408 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1409 int ret = 0; 1410 1411 if (!pp_funcs->set_fan_speed_rpm) 1412 return -EOPNOTSUPP; 1413 1414 mutex_lock(&adev->pm.mutex); 1415 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1416 speed); 1417 mutex_unlock(&adev->pm.mutex); 1418 1419 return ret; 1420 } 1421 1422 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1423 uint32_t mode) 1424 { 1425 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1426 int ret = 0; 1427 1428 if (!pp_funcs->set_fan_control_mode) 1429 return -EOPNOTSUPP; 1430 1431 mutex_lock(&adev->pm.mutex); 1432 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1433 mode); 1434 mutex_unlock(&adev->pm.mutex); 1435 1436 return ret; 1437 } 1438 1439 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1440 uint32_t *limit, 1441 enum pp_power_limit_level pp_limit_level, 1442 enum pp_power_type power_type) 1443 { 1444 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1445 int ret = 0; 1446 1447 if (!pp_funcs->get_power_limit) 1448 return -ENODATA; 1449 1450 mutex_lock(&adev->pm.mutex); 1451 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1452 limit, 1453 pp_limit_level, 1454 power_type); 1455 mutex_unlock(&adev->pm.mutex); 1456 1457 return ret; 1458 } 1459 1460 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1461 uint32_t limit) 1462 { 1463 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1464 int ret = 0; 1465 1466 if (!pp_funcs->set_power_limit) 1467 return -EINVAL; 1468 1469 mutex_lock(&adev->pm.mutex); 1470 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1471 limit); 1472 mutex_unlock(&adev->pm.mutex); 1473 1474 return ret; 1475 } 1476 1477 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1478 { 1479 bool cclk_dpm_supported = false; 1480 1481 if (!is_support_sw_smu(adev)) 1482 return false; 1483 1484 mutex_lock(&adev->pm.mutex); 1485 cclk_dpm_supported = is_support_cclk_dpm(adev); 1486 mutex_unlock(&adev->pm.mutex); 1487 1488 return (int)cclk_dpm_supported; 1489 } 1490 1491 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1492 struct seq_file *m) 1493 { 1494 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1495 1496 if (!pp_funcs->debugfs_print_current_performance_level) 1497 return -EOPNOTSUPP; 1498 1499 mutex_lock(&adev->pm.mutex); 1500 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1501 m); 1502 mutex_unlock(&adev->pm.mutex); 1503 1504 return 0; 1505 } 1506 1507 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1508 void **addr, 1509 size_t *size) 1510 { 1511 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1512 int ret = 0; 1513 1514 if (!pp_funcs->get_smu_prv_buf_details) 1515 return -ENOSYS; 1516 1517 mutex_lock(&adev->pm.mutex); 1518 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1519 addr, 1520 size); 1521 mutex_unlock(&adev->pm.mutex); 1522 1523 return ret; 1524 } 1525 1526 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1527 { 1528 if (is_support_sw_smu(adev)) { 1529 struct smu_context *smu = adev->powerplay.pp_handle; 1530 1531 return (smu->od_enabled || smu->is_apu); 1532 } else { 1533 struct pp_hwmgr *hwmgr; 1534 1535 /* 1536 * dpm on some legacy asics don't carry od_enabled member 1537 * as its pp_handle is casted directly from adev. 1538 */ 1539 if (amdgpu_dpm_is_legacy_dpm(adev)) 1540 return false; 1541 1542 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1543 1544 return hwmgr->od_enabled; 1545 } 1546 } 1547 1548 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1549 const char *buf, 1550 size_t size) 1551 { 1552 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1553 int ret = 0; 1554 1555 if (!pp_funcs->set_pp_table) 1556 return -EOPNOTSUPP; 1557 1558 mutex_lock(&adev->pm.mutex); 1559 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1560 buf, 1561 size); 1562 mutex_unlock(&adev->pm.mutex); 1563 1564 return ret; 1565 } 1566 1567 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1568 { 1569 struct smu_context *smu = adev->powerplay.pp_handle; 1570 1571 if (!is_support_sw_smu(adev)) 1572 return INT_MAX; 1573 1574 return smu->cpu_core_num; 1575 } 1576 1577 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1578 { 1579 if (!is_support_sw_smu(adev)) 1580 return; 1581 1582 amdgpu_smu_stb_debug_fs_init(adev); 1583 } 1584 1585 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1586 const struct amd_pp_display_configuration *input) 1587 { 1588 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1589 int ret = 0; 1590 1591 if (!pp_funcs->display_configuration_change) 1592 return 0; 1593 1594 mutex_lock(&adev->pm.mutex); 1595 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1596 input); 1597 mutex_unlock(&adev->pm.mutex); 1598 1599 return ret; 1600 } 1601 1602 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1603 enum amd_pp_clock_type type, 1604 struct amd_pp_clocks *clocks) 1605 { 1606 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1607 int ret = 0; 1608 1609 if (!pp_funcs->get_clock_by_type) 1610 return 0; 1611 1612 mutex_lock(&adev->pm.mutex); 1613 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1614 type, 1615 clocks); 1616 mutex_unlock(&adev->pm.mutex); 1617 1618 return ret; 1619 } 1620 1621 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1622 struct amd_pp_simple_clock_info *clocks) 1623 { 1624 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1625 int ret = 0; 1626 1627 if (!pp_funcs->get_display_mode_validation_clocks) 1628 return 0; 1629 1630 mutex_lock(&adev->pm.mutex); 1631 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1632 clocks); 1633 mutex_unlock(&adev->pm.mutex); 1634 1635 return ret; 1636 } 1637 1638 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1639 enum amd_pp_clock_type type, 1640 struct pp_clock_levels_with_latency *clocks) 1641 { 1642 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1643 int ret = 0; 1644 1645 if (!pp_funcs->get_clock_by_type_with_latency) 1646 return 0; 1647 1648 mutex_lock(&adev->pm.mutex); 1649 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1650 type, 1651 clocks); 1652 mutex_unlock(&adev->pm.mutex); 1653 1654 return ret; 1655 } 1656 1657 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1658 enum amd_pp_clock_type type, 1659 struct pp_clock_levels_with_voltage *clocks) 1660 { 1661 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1662 int ret = 0; 1663 1664 if (!pp_funcs->get_clock_by_type_with_voltage) 1665 return 0; 1666 1667 mutex_lock(&adev->pm.mutex); 1668 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1669 type, 1670 clocks); 1671 mutex_unlock(&adev->pm.mutex); 1672 1673 return ret; 1674 } 1675 1676 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1677 void *clock_ranges) 1678 { 1679 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1680 int ret = 0; 1681 1682 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1683 return -EOPNOTSUPP; 1684 1685 mutex_lock(&adev->pm.mutex); 1686 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1687 clock_ranges); 1688 mutex_unlock(&adev->pm.mutex); 1689 1690 return ret; 1691 } 1692 1693 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1694 struct pp_display_clock_request *clock) 1695 { 1696 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1697 int ret = 0; 1698 1699 if (!pp_funcs->display_clock_voltage_request) 1700 return -EOPNOTSUPP; 1701 1702 mutex_lock(&adev->pm.mutex); 1703 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1704 clock); 1705 mutex_unlock(&adev->pm.mutex); 1706 1707 return ret; 1708 } 1709 1710 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1711 struct amd_pp_clock_info *clocks) 1712 { 1713 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1714 int ret = 0; 1715 1716 if (!pp_funcs->get_current_clocks) 1717 return -EOPNOTSUPP; 1718 1719 mutex_lock(&adev->pm.mutex); 1720 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1721 clocks); 1722 mutex_unlock(&adev->pm.mutex); 1723 1724 return ret; 1725 } 1726 1727 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1728 { 1729 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1730 1731 if (!pp_funcs->notify_smu_enable_pwe) 1732 return; 1733 1734 mutex_lock(&adev->pm.mutex); 1735 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1736 mutex_unlock(&adev->pm.mutex); 1737 } 1738 1739 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1740 uint32_t count) 1741 { 1742 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1743 int ret = 0; 1744 1745 if (!pp_funcs->set_active_display_count) 1746 return -EOPNOTSUPP; 1747 1748 mutex_lock(&adev->pm.mutex); 1749 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1750 count); 1751 mutex_unlock(&adev->pm.mutex); 1752 1753 return ret; 1754 } 1755 1756 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1757 uint32_t clock) 1758 { 1759 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1760 int ret = 0; 1761 1762 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1763 return -EOPNOTSUPP; 1764 1765 mutex_lock(&adev->pm.mutex); 1766 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1767 clock); 1768 mutex_unlock(&adev->pm.mutex); 1769 1770 return ret; 1771 } 1772 1773 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1774 uint32_t clock) 1775 { 1776 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1777 1778 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1779 return; 1780 1781 mutex_lock(&adev->pm.mutex); 1782 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1783 clock); 1784 mutex_unlock(&adev->pm.mutex); 1785 } 1786 1787 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1788 uint32_t clock) 1789 { 1790 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1791 1792 if (!pp_funcs->set_hard_min_fclk_by_freq) 1793 return; 1794 1795 mutex_lock(&adev->pm.mutex); 1796 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1797 clock); 1798 mutex_unlock(&adev->pm.mutex); 1799 } 1800 1801 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1802 bool disable_memory_clock_switch) 1803 { 1804 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1805 int ret = 0; 1806 1807 if (!pp_funcs->display_disable_memory_clock_switch) 1808 return 0; 1809 1810 mutex_lock(&adev->pm.mutex); 1811 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1812 disable_memory_clock_switch); 1813 mutex_unlock(&adev->pm.mutex); 1814 1815 return ret; 1816 } 1817 1818 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1819 struct pp_smu_nv_clock_table *max_clocks) 1820 { 1821 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1822 int ret = 0; 1823 1824 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1825 return -EOPNOTSUPP; 1826 1827 mutex_lock(&adev->pm.mutex); 1828 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1829 max_clocks); 1830 mutex_unlock(&adev->pm.mutex); 1831 1832 return ret; 1833 } 1834 1835 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1836 unsigned int *clock_values_in_khz, 1837 unsigned int *num_states) 1838 { 1839 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1840 int ret = 0; 1841 1842 if (!pp_funcs->get_uclk_dpm_states) 1843 return -EOPNOTSUPP; 1844 1845 mutex_lock(&adev->pm.mutex); 1846 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1847 clock_values_in_khz, 1848 num_states); 1849 mutex_unlock(&adev->pm.mutex); 1850 1851 return ret; 1852 } 1853 1854 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1855 struct dpm_clocks *clock_table) 1856 { 1857 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1858 int ret = 0; 1859 1860 if (!pp_funcs->get_dpm_clock_table) 1861 return -EOPNOTSUPP; 1862 1863 mutex_lock(&adev->pm.mutex); 1864 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1865 clock_table); 1866 mutex_unlock(&adev->pm.mutex); 1867 1868 return ret; 1869 } 1870