1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 #include "amdgpu_smu.h" 35 36 #define amdgpu_dpm_enable_bapm(adev, e) \ 37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 38 39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) 40 41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 42 { 43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 44 int ret = 0; 45 46 if (!pp_funcs->get_sclk) 47 return 0; 48 49 mutex_lock(&adev->pm.mutex); 50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, 51 low); 52 mutex_unlock(&adev->pm.mutex); 53 54 return ret; 55 } 56 57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 58 { 59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 60 int ret = 0; 61 62 if (!pp_funcs->get_mclk) 63 return 0; 64 65 mutex_lock(&adev->pm.mutex); 66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, 67 low); 68 mutex_unlock(&adev->pm.mutex); 69 70 return ret; 71 } 72 73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 74 { 75 int ret = 0; 76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 78 79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 80 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 81 block_type, gate ? "gate" : "ungate"); 82 return 0; 83 } 84 85 mutex_lock(&adev->pm.mutex); 86 87 switch (block_type) { 88 case AMD_IP_BLOCK_TYPE_UVD: 89 case AMD_IP_BLOCK_TYPE_VCE: 90 case AMD_IP_BLOCK_TYPE_GFX: 91 case AMD_IP_BLOCK_TYPE_VCN: 92 case AMD_IP_BLOCK_TYPE_SDMA: 93 case AMD_IP_BLOCK_TYPE_JPEG: 94 case AMD_IP_BLOCK_TYPE_GMC: 95 case AMD_IP_BLOCK_TYPE_ACP: 96 case AMD_IP_BLOCK_TYPE_VPE: 97 if (pp_funcs && pp_funcs->set_powergating_by_smu) 98 ret = (pp_funcs->set_powergating_by_smu( 99 (adev)->powerplay.pp_handle, block_type, gate)); 100 break; 101 default: 102 break; 103 } 104 105 if (!ret) 106 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 107 108 mutex_unlock(&adev->pm.mutex); 109 110 return ret; 111 } 112 113 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev) 114 { 115 struct smu_context *smu = adev->powerplay.pp_handle; 116 int ret = -EOPNOTSUPP; 117 118 mutex_lock(&adev->pm.mutex); 119 ret = smu_set_gfx_power_up_by_imu(smu); 120 mutex_unlock(&adev->pm.mutex); 121 122 msleep(10); 123 124 return ret; 125 } 126 127 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 128 { 129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 130 void *pp_handle = adev->powerplay.pp_handle; 131 int ret = 0; 132 133 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 134 return -ENOENT; 135 136 mutex_lock(&adev->pm.mutex); 137 138 /* enter BACO state */ 139 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 140 141 mutex_unlock(&adev->pm.mutex); 142 143 return ret; 144 } 145 146 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 147 { 148 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 149 void *pp_handle = adev->powerplay.pp_handle; 150 int ret = 0; 151 152 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 153 return -ENOENT; 154 155 mutex_lock(&adev->pm.mutex); 156 157 /* exit BACO state */ 158 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 159 160 mutex_unlock(&adev->pm.mutex); 161 162 return ret; 163 } 164 165 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 166 enum pp_mp1_state mp1_state) 167 { 168 int ret = 0; 169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 170 171 if (pp_funcs && pp_funcs->set_mp1_state) { 172 mutex_lock(&adev->pm.mutex); 173 174 ret = pp_funcs->set_mp1_state( 175 adev->powerplay.pp_handle, 176 mp1_state); 177 178 mutex_unlock(&adev->pm.mutex); 179 } 180 181 return ret; 182 } 183 184 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) 185 { 186 int ret = 0; 187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 188 189 if (pp_funcs && pp_funcs->notify_rlc_state) { 190 mutex_lock(&adev->pm.mutex); 191 192 ret = pp_funcs->notify_rlc_state( 193 adev->powerplay.pp_handle, 194 en); 195 196 mutex_unlock(&adev->pm.mutex); 197 } 198 199 return ret; 200 } 201 202 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 203 { 204 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 205 void *pp_handle = adev->powerplay.pp_handle; 206 bool ret; 207 208 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 209 return false; 210 /* Don't use baco for reset in S3. 211 * This is a workaround for some platforms 212 * where entering BACO during suspend 213 * seems to cause reboots or hangs. 214 * This might be related to the fact that BACO controls 215 * power to the whole GPU including devices like audio and USB. 216 * Powering down/up everything may adversely affect these other 217 * devices. Needs more investigation. 218 */ 219 if (adev->in_s3) 220 return false; 221 222 mutex_lock(&adev->pm.mutex); 223 224 ret = pp_funcs->get_asic_baco_capability(pp_handle); 225 226 mutex_unlock(&adev->pm.mutex); 227 228 return ret; 229 } 230 231 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 232 { 233 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 234 void *pp_handle = adev->powerplay.pp_handle; 235 int ret = 0; 236 237 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 238 return -ENOENT; 239 240 mutex_lock(&adev->pm.mutex); 241 242 ret = pp_funcs->asic_reset_mode_2(pp_handle); 243 244 mutex_unlock(&adev->pm.mutex); 245 246 return ret; 247 } 248 249 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev) 250 { 251 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 252 void *pp_handle = adev->powerplay.pp_handle; 253 int ret = 0; 254 255 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) 256 return -ENOENT; 257 258 mutex_lock(&adev->pm.mutex); 259 260 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle); 261 262 mutex_unlock(&adev->pm.mutex); 263 264 return ret; 265 } 266 267 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 268 { 269 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 270 void *pp_handle = adev->powerplay.pp_handle; 271 int ret = 0; 272 273 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 274 return -ENOENT; 275 276 mutex_lock(&adev->pm.mutex); 277 278 /* enter BACO state */ 279 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 280 if (ret) 281 goto out; 282 283 /* exit BACO state */ 284 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 285 286 out: 287 mutex_unlock(&adev->pm.mutex); 288 return ret; 289 } 290 291 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 292 { 293 struct smu_context *smu = adev->powerplay.pp_handle; 294 bool support_mode1_reset = false; 295 296 if (is_support_sw_smu(adev)) { 297 mutex_lock(&adev->pm.mutex); 298 support_mode1_reset = smu_mode1_reset_is_support(smu); 299 mutex_unlock(&adev->pm.mutex); 300 } 301 302 return support_mode1_reset; 303 } 304 305 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 306 { 307 struct smu_context *smu = adev->powerplay.pp_handle; 308 int ret = -EOPNOTSUPP; 309 310 if (is_support_sw_smu(adev)) { 311 mutex_lock(&adev->pm.mutex); 312 ret = smu_mode1_reset(smu); 313 mutex_unlock(&adev->pm.mutex); 314 } 315 316 return ret; 317 } 318 319 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 320 enum PP_SMC_POWER_PROFILE type, 321 bool en) 322 { 323 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 324 int ret = 0; 325 326 if (amdgpu_sriov_vf(adev)) 327 return 0; 328 329 if (pp_funcs && pp_funcs->switch_power_profile) { 330 mutex_lock(&adev->pm.mutex); 331 ret = pp_funcs->switch_power_profile( 332 adev->powerplay.pp_handle, type, en); 333 mutex_unlock(&adev->pm.mutex); 334 } 335 336 return ret; 337 } 338 339 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 340 uint32_t pstate) 341 { 342 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 343 int ret = 0; 344 345 if (pp_funcs && pp_funcs->set_xgmi_pstate) { 346 mutex_lock(&adev->pm.mutex); 347 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 348 pstate); 349 mutex_unlock(&adev->pm.mutex); 350 } 351 352 return ret; 353 } 354 355 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 356 uint32_t cstate) 357 { 358 int ret = 0; 359 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 360 void *pp_handle = adev->powerplay.pp_handle; 361 362 if (pp_funcs && pp_funcs->set_df_cstate) { 363 mutex_lock(&adev->pm.mutex); 364 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 365 mutex_unlock(&adev->pm.mutex); 366 } 367 368 return ret; 369 } 370 371 int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc) 372 { 373 struct smu_context *smu = adev->powerplay.pp_handle; 374 int mode = XGMI_PLPD_NONE; 375 376 if (is_support_sw_smu(adev)) { 377 mode = smu->plpd_mode; 378 if (mode_desc == NULL) 379 return mode; 380 switch (smu->plpd_mode) { 381 case XGMI_PLPD_DISALLOW: 382 *mode_desc = "disallow"; 383 break; 384 case XGMI_PLPD_DEFAULT: 385 *mode_desc = "default"; 386 break; 387 case XGMI_PLPD_OPTIMIZED: 388 *mode_desc = "optimized"; 389 break; 390 case XGMI_PLPD_NONE: 391 default: 392 *mode_desc = "none"; 393 break; 394 } 395 } 396 397 return mode; 398 } 399 400 int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode) 401 { 402 struct smu_context *smu = adev->powerplay.pp_handle; 403 int ret = -EOPNOTSUPP; 404 405 if (is_support_sw_smu(adev)) { 406 mutex_lock(&adev->pm.mutex); 407 ret = smu_set_xgmi_plpd_mode(smu, mode); 408 mutex_unlock(&adev->pm.mutex); 409 } 410 411 return ret; 412 } 413 414 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 415 { 416 void *pp_handle = adev->powerplay.pp_handle; 417 const struct amd_pm_funcs *pp_funcs = 418 adev->powerplay.pp_funcs; 419 int ret = 0; 420 421 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { 422 mutex_lock(&adev->pm.mutex); 423 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 424 mutex_unlock(&adev->pm.mutex); 425 } 426 427 return ret; 428 } 429 430 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 431 uint32_t msg_id) 432 { 433 void *pp_handle = adev->powerplay.pp_handle; 434 const struct amd_pm_funcs *pp_funcs = 435 adev->powerplay.pp_funcs; 436 int ret = 0; 437 438 if (pp_funcs && pp_funcs->set_clockgating_by_smu) { 439 mutex_lock(&adev->pm.mutex); 440 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 441 msg_id); 442 mutex_unlock(&adev->pm.mutex); 443 } 444 445 return ret; 446 } 447 448 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 449 bool acquire) 450 { 451 void *pp_handle = adev->powerplay.pp_handle; 452 const struct amd_pm_funcs *pp_funcs = 453 adev->powerplay.pp_funcs; 454 int ret = -EOPNOTSUPP; 455 456 if (pp_funcs && pp_funcs->smu_i2c_bus_access) { 457 mutex_lock(&adev->pm.mutex); 458 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 459 acquire); 460 mutex_unlock(&adev->pm.mutex); 461 } 462 463 return ret; 464 } 465 466 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 467 { 468 if (adev->pm.dpm_enabled) { 469 mutex_lock(&adev->pm.mutex); 470 if (power_supply_is_system_supplied() > 0) 471 adev->pm.ac_power = true; 472 else 473 adev->pm.ac_power = false; 474 475 if (adev->powerplay.pp_funcs && 476 adev->powerplay.pp_funcs->enable_bapm) 477 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 478 479 if (is_support_sw_smu(adev)) 480 smu_set_ac_dc(adev->powerplay.pp_handle); 481 482 mutex_unlock(&adev->pm.mutex); 483 } 484 } 485 486 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 487 void *data, uint32_t *size) 488 { 489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 490 int ret = -EINVAL; 491 492 if (!data || !size) 493 return -EINVAL; 494 495 if (pp_funcs && pp_funcs->read_sensor) { 496 mutex_lock(&adev->pm.mutex); 497 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, 498 sensor, 499 data, 500 size); 501 mutex_unlock(&adev->pm.mutex); 502 } 503 504 return ret; 505 } 506 507 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) 508 { 509 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 510 int ret = -EOPNOTSUPP; 511 512 if (pp_funcs && pp_funcs->get_apu_thermal_limit) { 513 mutex_lock(&adev->pm.mutex); 514 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); 515 mutex_unlock(&adev->pm.mutex); 516 } 517 518 return ret; 519 } 520 521 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) 522 { 523 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 524 int ret = -EOPNOTSUPP; 525 526 if (pp_funcs && pp_funcs->set_apu_thermal_limit) { 527 mutex_lock(&adev->pm.mutex); 528 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); 529 mutex_unlock(&adev->pm.mutex); 530 } 531 532 return ret; 533 } 534 535 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 536 { 537 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 538 int i; 539 540 if (!adev->pm.dpm_enabled) 541 return; 542 543 if (!pp_funcs->pm_compute_clocks) 544 return; 545 546 if (adev->mode_info.num_crtc) 547 amdgpu_display_bandwidth_update(adev); 548 549 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 550 struct amdgpu_ring *ring = adev->rings[i]; 551 if (ring && ring->sched.ready) 552 amdgpu_fence_wait_empty(ring); 553 } 554 555 mutex_lock(&adev->pm.mutex); 556 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 557 mutex_unlock(&adev->pm.mutex); 558 } 559 560 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 561 { 562 int ret = 0; 563 564 if (adev->family == AMDGPU_FAMILY_SI) { 565 mutex_lock(&adev->pm.mutex); 566 if (enable) { 567 adev->pm.dpm.uvd_active = true; 568 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 569 } else { 570 adev->pm.dpm.uvd_active = false; 571 } 572 mutex_unlock(&adev->pm.mutex); 573 574 amdgpu_dpm_compute_clocks(adev); 575 return; 576 } 577 578 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 579 if (ret) 580 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 581 enable ? "enable" : "disable", ret); 582 } 583 584 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 585 { 586 int ret = 0; 587 588 if (adev->family == AMDGPU_FAMILY_SI) { 589 mutex_lock(&adev->pm.mutex); 590 if (enable) { 591 adev->pm.dpm.vce_active = true; 592 /* XXX select vce level based on ring/task */ 593 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 594 } else { 595 adev->pm.dpm.vce_active = false; 596 } 597 mutex_unlock(&adev->pm.mutex); 598 599 amdgpu_dpm_compute_clocks(adev); 600 return; 601 } 602 603 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 604 if (ret) 605 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 606 enable ? "enable" : "disable", ret); 607 } 608 609 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 610 { 611 int ret = 0; 612 613 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 614 if (ret) 615 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 616 enable ? "enable" : "disable", ret); 617 } 618 619 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) 620 { 621 int ret = 0; 622 623 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable); 624 if (ret) 625 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", 626 enable ? "enable" : "disable", ret); 627 } 628 629 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 630 { 631 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 632 int r = 0; 633 634 if (!pp_funcs || !pp_funcs->load_firmware) 635 return 0; 636 637 mutex_lock(&adev->pm.mutex); 638 r = pp_funcs->load_firmware(adev->powerplay.pp_handle); 639 if (r) { 640 pr_err("smu firmware loading failed\n"); 641 goto out; 642 } 643 644 if (smu_version) 645 *smu_version = adev->pm.fw_version; 646 647 out: 648 mutex_unlock(&adev->pm.mutex); 649 return r; 650 } 651 652 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) 653 { 654 int ret = 0; 655 656 if (is_support_sw_smu(adev)) { 657 mutex_lock(&adev->pm.mutex); 658 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, 659 enable); 660 mutex_unlock(&adev->pm.mutex); 661 } 662 663 return ret; 664 } 665 666 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) 667 { 668 struct smu_context *smu = adev->powerplay.pp_handle; 669 int ret = 0; 670 671 if (!is_support_sw_smu(adev)) 672 return -EOPNOTSUPP; 673 674 mutex_lock(&adev->pm.mutex); 675 ret = smu_send_hbm_bad_pages_num(smu, size); 676 mutex_unlock(&adev->pm.mutex); 677 678 return ret; 679 } 680 681 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size) 682 { 683 struct smu_context *smu = adev->powerplay.pp_handle; 684 int ret = 0; 685 686 if (!is_support_sw_smu(adev)) 687 return -EOPNOTSUPP; 688 689 mutex_lock(&adev->pm.mutex); 690 ret = smu_send_hbm_bad_channel_flag(smu, size); 691 mutex_unlock(&adev->pm.mutex); 692 693 return ret; 694 } 695 696 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, 697 enum pp_clock_type type, 698 uint32_t *min, 699 uint32_t *max) 700 { 701 int ret = 0; 702 703 if (type != PP_SCLK) 704 return -EINVAL; 705 706 if (!is_support_sw_smu(adev)) 707 return -EOPNOTSUPP; 708 709 mutex_lock(&adev->pm.mutex); 710 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, 711 SMU_SCLK, 712 min, 713 max); 714 mutex_unlock(&adev->pm.mutex); 715 716 return ret; 717 } 718 719 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, 720 enum pp_clock_type type, 721 uint32_t min, 722 uint32_t max) 723 { 724 struct smu_context *smu = adev->powerplay.pp_handle; 725 int ret = 0; 726 727 if (type != PP_SCLK) 728 return -EINVAL; 729 730 if (!is_support_sw_smu(adev)) 731 return -EOPNOTSUPP; 732 733 mutex_lock(&adev->pm.mutex); 734 ret = smu_set_soft_freq_range(smu, 735 SMU_SCLK, 736 min, 737 max); 738 mutex_unlock(&adev->pm.mutex); 739 740 return ret; 741 } 742 743 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) 744 { 745 struct smu_context *smu = adev->powerplay.pp_handle; 746 int ret = 0; 747 748 if (!is_support_sw_smu(adev)) 749 return 0; 750 751 mutex_lock(&adev->pm.mutex); 752 ret = smu_write_watermarks_table(smu); 753 mutex_unlock(&adev->pm.mutex); 754 755 return ret; 756 } 757 758 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, 759 enum smu_event_type event, 760 uint64_t event_arg) 761 { 762 struct smu_context *smu = adev->powerplay.pp_handle; 763 int ret = 0; 764 765 if (!is_support_sw_smu(adev)) 766 return -EOPNOTSUPP; 767 768 mutex_lock(&adev->pm.mutex); 769 ret = smu_wait_for_event(smu, event, event_arg); 770 mutex_unlock(&adev->pm.mutex); 771 772 return ret; 773 } 774 775 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value) 776 { 777 struct smu_context *smu = adev->powerplay.pp_handle; 778 int ret = 0; 779 780 if (!is_support_sw_smu(adev)) 781 return -EOPNOTSUPP; 782 783 mutex_lock(&adev->pm.mutex); 784 ret = smu_set_residency_gfxoff(smu, value); 785 mutex_unlock(&adev->pm.mutex); 786 787 return ret; 788 } 789 790 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value) 791 { 792 struct smu_context *smu = adev->powerplay.pp_handle; 793 int ret = 0; 794 795 if (!is_support_sw_smu(adev)) 796 return -EOPNOTSUPP; 797 798 mutex_lock(&adev->pm.mutex); 799 ret = smu_get_residency_gfxoff(smu, value); 800 mutex_unlock(&adev->pm.mutex); 801 802 return ret; 803 } 804 805 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value) 806 { 807 struct smu_context *smu = adev->powerplay.pp_handle; 808 int ret = 0; 809 810 if (!is_support_sw_smu(adev)) 811 return -EOPNOTSUPP; 812 813 mutex_lock(&adev->pm.mutex); 814 ret = smu_get_entrycount_gfxoff(smu, value); 815 mutex_unlock(&adev->pm.mutex); 816 817 return ret; 818 } 819 820 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) 821 { 822 struct smu_context *smu = adev->powerplay.pp_handle; 823 int ret = 0; 824 825 if (!is_support_sw_smu(adev)) 826 return -EOPNOTSUPP; 827 828 mutex_lock(&adev->pm.mutex); 829 ret = smu_get_status_gfxoff(smu, value); 830 mutex_unlock(&adev->pm.mutex); 831 832 return ret; 833 } 834 835 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) 836 { 837 struct smu_context *smu = adev->powerplay.pp_handle; 838 839 if (!is_support_sw_smu(adev)) 840 return 0; 841 842 return atomic64_read(&smu->throttle_int_counter); 843 } 844 845 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set 846 * @adev: amdgpu_device pointer 847 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) 848 * 849 */ 850 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, 851 enum gfx_change_state state) 852 { 853 mutex_lock(&adev->pm.mutex); 854 if (adev->powerplay.pp_funcs && 855 adev->powerplay.pp_funcs->gfx_state_change_set) 856 ((adev)->powerplay.pp_funcs->gfx_state_change_set( 857 (adev)->powerplay.pp_handle, state)); 858 mutex_unlock(&adev->pm.mutex); 859 } 860 861 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, 862 void *umc_ecc) 863 { 864 struct smu_context *smu = adev->powerplay.pp_handle; 865 int ret = 0; 866 867 if (!is_support_sw_smu(adev)) 868 return -EOPNOTSUPP; 869 870 mutex_lock(&adev->pm.mutex); 871 ret = smu_get_ecc_info(smu, umc_ecc); 872 mutex_unlock(&adev->pm.mutex); 873 874 return ret; 875 } 876 877 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, 878 uint32_t idx) 879 { 880 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 881 struct amd_vce_state *vstate = NULL; 882 883 if (!pp_funcs->get_vce_clock_state) 884 return NULL; 885 886 mutex_lock(&adev->pm.mutex); 887 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, 888 idx); 889 mutex_unlock(&adev->pm.mutex); 890 891 return vstate; 892 } 893 894 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, 895 enum amd_pm_state_type *state) 896 { 897 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 898 899 mutex_lock(&adev->pm.mutex); 900 901 if (!pp_funcs->get_current_power_state) { 902 *state = adev->pm.dpm.user_state; 903 goto out; 904 } 905 906 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); 907 if (*state < POWER_STATE_TYPE_DEFAULT || 908 *state > POWER_STATE_TYPE_INTERNAL_3DPERF) 909 *state = adev->pm.dpm.user_state; 910 911 out: 912 mutex_unlock(&adev->pm.mutex); 913 } 914 915 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, 916 enum amd_pm_state_type state) 917 { 918 mutex_lock(&adev->pm.mutex); 919 adev->pm.dpm.user_state = state; 920 mutex_unlock(&adev->pm.mutex); 921 922 if (is_support_sw_smu(adev)) 923 return; 924 925 if (amdgpu_dpm_dispatch_task(adev, 926 AMD_PP_TASK_ENABLE_USER_STATE, 927 &state) == -EOPNOTSUPP) 928 amdgpu_dpm_compute_clocks(adev); 929 } 930 931 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) 932 { 933 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 934 enum amd_dpm_forced_level level; 935 936 if (!pp_funcs) 937 return AMD_DPM_FORCED_LEVEL_AUTO; 938 939 mutex_lock(&adev->pm.mutex); 940 if (pp_funcs->get_performance_level) 941 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); 942 else 943 level = adev->pm.dpm.forced_level; 944 mutex_unlock(&adev->pm.mutex); 945 946 return level; 947 } 948 949 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, 950 enum amd_dpm_forced_level level) 951 { 952 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 953 enum amd_dpm_forced_level current_level; 954 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 955 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 956 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 957 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 958 959 if (!pp_funcs || !pp_funcs->force_performance_level) 960 return 0; 961 962 if (adev->pm.dpm.thermal_active) 963 return -EINVAL; 964 965 current_level = amdgpu_dpm_get_performance_level(adev); 966 if (current_level == level) 967 return 0; 968 969 if (adev->asic_type == CHIP_RAVEN) { 970 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 971 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && 972 level == AMD_DPM_FORCED_LEVEL_MANUAL) 973 amdgpu_gfx_off_ctrl(adev, false); 974 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && 975 level != AMD_DPM_FORCED_LEVEL_MANUAL) 976 amdgpu_gfx_off_ctrl(adev, true); 977 } 978 } 979 980 if (!(current_level & profile_mode_mask) && 981 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) 982 return -EINVAL; 983 984 if (!(current_level & profile_mode_mask) && 985 (level & profile_mode_mask)) { 986 /* enter UMD Pstate */ 987 amdgpu_device_ip_set_powergating_state(adev, 988 AMD_IP_BLOCK_TYPE_GFX, 989 AMD_PG_STATE_UNGATE); 990 amdgpu_device_ip_set_clockgating_state(adev, 991 AMD_IP_BLOCK_TYPE_GFX, 992 AMD_CG_STATE_UNGATE); 993 } else if ((current_level & profile_mode_mask) && 994 !(level & profile_mode_mask)) { 995 /* exit UMD Pstate */ 996 amdgpu_device_ip_set_clockgating_state(adev, 997 AMD_IP_BLOCK_TYPE_GFX, 998 AMD_CG_STATE_GATE); 999 amdgpu_device_ip_set_powergating_state(adev, 1000 AMD_IP_BLOCK_TYPE_GFX, 1001 AMD_PG_STATE_GATE); 1002 } 1003 1004 mutex_lock(&adev->pm.mutex); 1005 1006 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, 1007 level)) { 1008 mutex_unlock(&adev->pm.mutex); 1009 return -EINVAL; 1010 } 1011 1012 adev->pm.dpm.forced_level = level; 1013 1014 mutex_unlock(&adev->pm.mutex); 1015 1016 return 0; 1017 } 1018 1019 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, 1020 struct pp_states_info *states) 1021 { 1022 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1023 int ret = 0; 1024 1025 if (!pp_funcs->get_pp_num_states) 1026 return -EOPNOTSUPP; 1027 1028 mutex_lock(&adev->pm.mutex); 1029 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, 1030 states); 1031 mutex_unlock(&adev->pm.mutex); 1032 1033 return ret; 1034 } 1035 1036 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, 1037 enum amd_pp_task task_id, 1038 enum amd_pm_state_type *user_state) 1039 { 1040 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1041 int ret = 0; 1042 1043 if (!pp_funcs->dispatch_tasks) 1044 return -EOPNOTSUPP; 1045 1046 mutex_lock(&adev->pm.mutex); 1047 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, 1048 task_id, 1049 user_state); 1050 mutex_unlock(&adev->pm.mutex); 1051 1052 return ret; 1053 } 1054 1055 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) 1056 { 1057 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1058 int ret = 0; 1059 1060 if (!pp_funcs->get_pp_table) 1061 return 0; 1062 1063 mutex_lock(&adev->pm.mutex); 1064 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, 1065 table); 1066 mutex_unlock(&adev->pm.mutex); 1067 1068 return ret; 1069 } 1070 1071 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, 1072 uint32_t type, 1073 long *input, 1074 uint32_t size) 1075 { 1076 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1077 int ret = 0; 1078 1079 if (!pp_funcs->set_fine_grain_clk_vol) 1080 return 0; 1081 1082 mutex_lock(&adev->pm.mutex); 1083 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, 1084 type, 1085 input, 1086 size); 1087 mutex_unlock(&adev->pm.mutex); 1088 1089 return ret; 1090 } 1091 1092 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, 1093 uint32_t type, 1094 long *input, 1095 uint32_t size) 1096 { 1097 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1098 int ret = 0; 1099 1100 if (!pp_funcs->odn_edit_dpm_table) 1101 return 0; 1102 1103 mutex_lock(&adev->pm.mutex); 1104 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, 1105 type, 1106 input, 1107 size); 1108 mutex_unlock(&adev->pm.mutex); 1109 1110 return ret; 1111 } 1112 1113 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, 1114 enum pp_clock_type type, 1115 char *buf) 1116 { 1117 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1118 int ret = 0; 1119 1120 if (!pp_funcs->print_clock_levels) 1121 return 0; 1122 1123 mutex_lock(&adev->pm.mutex); 1124 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, 1125 type, 1126 buf); 1127 mutex_unlock(&adev->pm.mutex); 1128 1129 return ret; 1130 } 1131 1132 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, 1133 enum pp_clock_type type, 1134 char *buf, 1135 int *offset) 1136 { 1137 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1138 int ret = 0; 1139 1140 if (!pp_funcs->emit_clock_levels) 1141 return -ENOENT; 1142 1143 mutex_lock(&adev->pm.mutex); 1144 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle, 1145 type, 1146 buf, 1147 offset); 1148 mutex_unlock(&adev->pm.mutex); 1149 1150 return ret; 1151 } 1152 1153 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, 1154 uint64_t ppfeature_masks) 1155 { 1156 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1157 int ret = 0; 1158 1159 if (!pp_funcs->set_ppfeature_status) 1160 return 0; 1161 1162 mutex_lock(&adev->pm.mutex); 1163 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, 1164 ppfeature_masks); 1165 mutex_unlock(&adev->pm.mutex); 1166 1167 return ret; 1168 } 1169 1170 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) 1171 { 1172 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1173 int ret = 0; 1174 1175 if (!pp_funcs->get_ppfeature_status) 1176 return 0; 1177 1178 mutex_lock(&adev->pm.mutex); 1179 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, 1180 buf); 1181 mutex_unlock(&adev->pm.mutex); 1182 1183 return ret; 1184 } 1185 1186 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, 1187 enum pp_clock_type type, 1188 uint32_t mask) 1189 { 1190 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1191 int ret = 0; 1192 1193 if (!pp_funcs->force_clock_level) 1194 return 0; 1195 1196 mutex_lock(&adev->pm.mutex); 1197 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, 1198 type, 1199 mask); 1200 mutex_unlock(&adev->pm.mutex); 1201 1202 return ret; 1203 } 1204 1205 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) 1206 { 1207 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1208 int ret = 0; 1209 1210 if (!pp_funcs->get_sclk_od) 1211 return -EOPNOTSUPP; 1212 1213 mutex_lock(&adev->pm.mutex); 1214 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); 1215 mutex_unlock(&adev->pm.mutex); 1216 1217 return ret; 1218 } 1219 1220 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) 1221 { 1222 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1223 1224 if (is_support_sw_smu(adev)) 1225 return -EOPNOTSUPP; 1226 1227 mutex_lock(&adev->pm.mutex); 1228 if (pp_funcs->set_sclk_od) 1229 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); 1230 mutex_unlock(&adev->pm.mutex); 1231 1232 if (amdgpu_dpm_dispatch_task(adev, 1233 AMD_PP_TASK_READJUST_POWER_STATE, 1234 NULL) == -EOPNOTSUPP) { 1235 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1236 amdgpu_dpm_compute_clocks(adev); 1237 } 1238 1239 return 0; 1240 } 1241 1242 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) 1243 { 1244 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1245 int ret = 0; 1246 1247 if (!pp_funcs->get_mclk_od) 1248 return -EOPNOTSUPP; 1249 1250 mutex_lock(&adev->pm.mutex); 1251 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); 1252 mutex_unlock(&adev->pm.mutex); 1253 1254 return ret; 1255 } 1256 1257 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) 1258 { 1259 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1260 1261 if (is_support_sw_smu(adev)) 1262 return -EOPNOTSUPP; 1263 1264 mutex_lock(&adev->pm.mutex); 1265 if (pp_funcs->set_mclk_od) 1266 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); 1267 mutex_unlock(&adev->pm.mutex); 1268 1269 if (amdgpu_dpm_dispatch_task(adev, 1270 AMD_PP_TASK_READJUST_POWER_STATE, 1271 NULL) == -EOPNOTSUPP) { 1272 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1273 amdgpu_dpm_compute_clocks(adev); 1274 } 1275 1276 return 0; 1277 } 1278 1279 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, 1280 char *buf) 1281 { 1282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1283 int ret = 0; 1284 1285 if (!pp_funcs->get_power_profile_mode) 1286 return -EOPNOTSUPP; 1287 1288 mutex_lock(&adev->pm.mutex); 1289 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, 1290 buf); 1291 mutex_unlock(&adev->pm.mutex); 1292 1293 return ret; 1294 } 1295 1296 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 1297 long *input, uint32_t size) 1298 { 1299 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1300 int ret = 0; 1301 1302 if (!pp_funcs->set_power_profile_mode) 1303 return 0; 1304 1305 mutex_lock(&adev->pm.mutex); 1306 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, 1307 input, 1308 size); 1309 mutex_unlock(&adev->pm.mutex); 1310 1311 return ret; 1312 } 1313 1314 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) 1315 { 1316 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1317 int ret = 0; 1318 1319 if (!pp_funcs->get_gpu_metrics) 1320 return 0; 1321 1322 mutex_lock(&adev->pm.mutex); 1323 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, 1324 table); 1325 mutex_unlock(&adev->pm.mutex); 1326 1327 return ret; 1328 } 1329 1330 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, 1331 size_t size) 1332 { 1333 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1334 int ret = 0; 1335 1336 if (!pp_funcs->get_pm_metrics) 1337 return -EOPNOTSUPP; 1338 1339 mutex_lock(&adev->pm.mutex); 1340 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, 1341 size); 1342 mutex_unlock(&adev->pm.mutex); 1343 1344 return ret; 1345 } 1346 1347 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, 1348 uint32_t *fan_mode) 1349 { 1350 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1351 int ret = 0; 1352 1353 if (!pp_funcs->get_fan_control_mode) 1354 return -EOPNOTSUPP; 1355 1356 mutex_lock(&adev->pm.mutex); 1357 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, 1358 fan_mode); 1359 mutex_unlock(&adev->pm.mutex); 1360 1361 return ret; 1362 } 1363 1364 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, 1365 uint32_t speed) 1366 { 1367 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1368 int ret = 0; 1369 1370 if (!pp_funcs->set_fan_speed_pwm) 1371 return -EOPNOTSUPP; 1372 1373 mutex_lock(&adev->pm.mutex); 1374 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, 1375 speed); 1376 mutex_unlock(&adev->pm.mutex); 1377 1378 return ret; 1379 } 1380 1381 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, 1382 uint32_t *speed) 1383 { 1384 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1385 int ret = 0; 1386 1387 if (!pp_funcs->get_fan_speed_pwm) 1388 return -EOPNOTSUPP; 1389 1390 mutex_lock(&adev->pm.mutex); 1391 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, 1392 speed); 1393 mutex_unlock(&adev->pm.mutex); 1394 1395 return ret; 1396 } 1397 1398 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, 1399 uint32_t *speed) 1400 { 1401 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1402 int ret = 0; 1403 1404 if (!pp_funcs->get_fan_speed_rpm) 1405 return -EOPNOTSUPP; 1406 1407 mutex_lock(&adev->pm.mutex); 1408 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, 1409 speed); 1410 mutex_unlock(&adev->pm.mutex); 1411 1412 return ret; 1413 } 1414 1415 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, 1416 uint32_t speed) 1417 { 1418 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1419 int ret = 0; 1420 1421 if (!pp_funcs->set_fan_speed_rpm) 1422 return -EOPNOTSUPP; 1423 1424 mutex_lock(&adev->pm.mutex); 1425 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, 1426 speed); 1427 mutex_unlock(&adev->pm.mutex); 1428 1429 return ret; 1430 } 1431 1432 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, 1433 uint32_t mode) 1434 { 1435 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1436 int ret = 0; 1437 1438 if (!pp_funcs->set_fan_control_mode) 1439 return -EOPNOTSUPP; 1440 1441 mutex_lock(&adev->pm.mutex); 1442 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, 1443 mode); 1444 mutex_unlock(&adev->pm.mutex); 1445 1446 return ret; 1447 } 1448 1449 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, 1450 uint32_t *limit, 1451 enum pp_power_limit_level pp_limit_level, 1452 enum pp_power_type power_type) 1453 { 1454 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1455 int ret = 0; 1456 1457 if (!pp_funcs->get_power_limit) 1458 return -ENODATA; 1459 1460 mutex_lock(&adev->pm.mutex); 1461 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, 1462 limit, 1463 pp_limit_level, 1464 power_type); 1465 mutex_unlock(&adev->pm.mutex); 1466 1467 return ret; 1468 } 1469 1470 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, 1471 uint32_t limit) 1472 { 1473 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1474 int ret = 0; 1475 1476 if (!pp_funcs->set_power_limit) 1477 return -EINVAL; 1478 1479 mutex_lock(&adev->pm.mutex); 1480 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, 1481 limit); 1482 mutex_unlock(&adev->pm.mutex); 1483 1484 return ret; 1485 } 1486 1487 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) 1488 { 1489 bool cclk_dpm_supported = false; 1490 1491 if (!is_support_sw_smu(adev)) 1492 return false; 1493 1494 mutex_lock(&adev->pm.mutex); 1495 cclk_dpm_supported = is_support_cclk_dpm(adev); 1496 mutex_unlock(&adev->pm.mutex); 1497 1498 return (int)cclk_dpm_supported; 1499 } 1500 1501 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 1502 struct seq_file *m) 1503 { 1504 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1505 1506 if (!pp_funcs->debugfs_print_current_performance_level) 1507 return -EOPNOTSUPP; 1508 1509 mutex_lock(&adev->pm.mutex); 1510 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, 1511 m); 1512 mutex_unlock(&adev->pm.mutex); 1513 1514 return 0; 1515 } 1516 1517 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, 1518 void **addr, 1519 size_t *size) 1520 { 1521 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1522 int ret = 0; 1523 1524 if (!pp_funcs->get_smu_prv_buf_details) 1525 return -ENOSYS; 1526 1527 mutex_lock(&adev->pm.mutex); 1528 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, 1529 addr, 1530 size); 1531 mutex_unlock(&adev->pm.mutex); 1532 1533 return ret; 1534 } 1535 1536 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) 1537 { 1538 if (is_support_sw_smu(adev)) { 1539 struct smu_context *smu = adev->powerplay.pp_handle; 1540 1541 return (smu->od_enabled || smu->is_apu); 1542 } else { 1543 struct pp_hwmgr *hwmgr; 1544 1545 /* 1546 * dpm on some legacy asics don't carry od_enabled member 1547 * as its pp_handle is casted directly from adev. 1548 */ 1549 if (amdgpu_dpm_is_legacy_dpm(adev)) 1550 return false; 1551 1552 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; 1553 1554 return hwmgr->od_enabled; 1555 } 1556 } 1557 1558 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, 1559 const char *buf, 1560 size_t size) 1561 { 1562 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1563 int ret = 0; 1564 1565 if (!pp_funcs->set_pp_table) 1566 return -EOPNOTSUPP; 1567 1568 mutex_lock(&adev->pm.mutex); 1569 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, 1570 buf, 1571 size); 1572 mutex_unlock(&adev->pm.mutex); 1573 1574 return ret; 1575 } 1576 1577 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) 1578 { 1579 struct smu_context *smu = adev->powerplay.pp_handle; 1580 1581 if (!is_support_sw_smu(adev)) 1582 return INT_MAX; 1583 1584 return smu->cpu_core_num; 1585 } 1586 1587 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) 1588 { 1589 if (!is_support_sw_smu(adev)) 1590 return; 1591 1592 amdgpu_smu_stb_debug_fs_init(adev); 1593 } 1594 1595 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, 1596 const struct amd_pp_display_configuration *input) 1597 { 1598 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1599 int ret = 0; 1600 1601 if (!pp_funcs->display_configuration_change) 1602 return 0; 1603 1604 mutex_lock(&adev->pm.mutex); 1605 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, 1606 input); 1607 mutex_unlock(&adev->pm.mutex); 1608 1609 return ret; 1610 } 1611 1612 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, 1613 enum amd_pp_clock_type type, 1614 struct amd_pp_clocks *clocks) 1615 { 1616 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1617 int ret = 0; 1618 1619 if (!pp_funcs->get_clock_by_type) 1620 return 0; 1621 1622 mutex_lock(&adev->pm.mutex); 1623 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, 1624 type, 1625 clocks); 1626 mutex_unlock(&adev->pm.mutex); 1627 1628 return ret; 1629 } 1630 1631 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, 1632 struct amd_pp_simple_clock_info *clocks) 1633 { 1634 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1635 int ret = 0; 1636 1637 if (!pp_funcs->get_display_mode_validation_clocks) 1638 return 0; 1639 1640 mutex_lock(&adev->pm.mutex); 1641 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, 1642 clocks); 1643 mutex_unlock(&adev->pm.mutex); 1644 1645 return ret; 1646 } 1647 1648 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, 1649 enum amd_pp_clock_type type, 1650 struct pp_clock_levels_with_latency *clocks) 1651 { 1652 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1653 int ret = 0; 1654 1655 if (!pp_funcs->get_clock_by_type_with_latency) 1656 return 0; 1657 1658 mutex_lock(&adev->pm.mutex); 1659 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, 1660 type, 1661 clocks); 1662 mutex_unlock(&adev->pm.mutex); 1663 1664 return ret; 1665 } 1666 1667 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, 1668 enum amd_pp_clock_type type, 1669 struct pp_clock_levels_with_voltage *clocks) 1670 { 1671 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1672 int ret = 0; 1673 1674 if (!pp_funcs->get_clock_by_type_with_voltage) 1675 return 0; 1676 1677 mutex_lock(&adev->pm.mutex); 1678 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, 1679 type, 1680 clocks); 1681 mutex_unlock(&adev->pm.mutex); 1682 1683 return ret; 1684 } 1685 1686 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, 1687 void *clock_ranges) 1688 { 1689 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1690 int ret = 0; 1691 1692 if (!pp_funcs->set_watermarks_for_clocks_ranges) 1693 return -EOPNOTSUPP; 1694 1695 mutex_lock(&adev->pm.mutex); 1696 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, 1697 clock_ranges); 1698 mutex_unlock(&adev->pm.mutex); 1699 1700 return ret; 1701 } 1702 1703 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, 1704 struct pp_display_clock_request *clock) 1705 { 1706 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1707 int ret = 0; 1708 1709 if (!pp_funcs->display_clock_voltage_request) 1710 return -EOPNOTSUPP; 1711 1712 mutex_lock(&adev->pm.mutex); 1713 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, 1714 clock); 1715 mutex_unlock(&adev->pm.mutex); 1716 1717 return ret; 1718 } 1719 1720 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, 1721 struct amd_pp_clock_info *clocks) 1722 { 1723 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1724 int ret = 0; 1725 1726 if (!pp_funcs->get_current_clocks) 1727 return -EOPNOTSUPP; 1728 1729 mutex_lock(&adev->pm.mutex); 1730 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, 1731 clocks); 1732 mutex_unlock(&adev->pm.mutex); 1733 1734 return ret; 1735 } 1736 1737 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) 1738 { 1739 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1740 1741 if (!pp_funcs->notify_smu_enable_pwe) 1742 return; 1743 1744 mutex_lock(&adev->pm.mutex); 1745 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); 1746 mutex_unlock(&adev->pm.mutex); 1747 } 1748 1749 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, 1750 uint32_t count) 1751 { 1752 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1753 int ret = 0; 1754 1755 if (!pp_funcs->set_active_display_count) 1756 return -EOPNOTSUPP; 1757 1758 mutex_lock(&adev->pm.mutex); 1759 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, 1760 count); 1761 mutex_unlock(&adev->pm.mutex); 1762 1763 return ret; 1764 } 1765 1766 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, 1767 uint32_t clock) 1768 { 1769 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1770 int ret = 0; 1771 1772 if (!pp_funcs->set_min_deep_sleep_dcefclk) 1773 return -EOPNOTSUPP; 1774 1775 mutex_lock(&adev->pm.mutex); 1776 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, 1777 clock); 1778 mutex_unlock(&adev->pm.mutex); 1779 1780 return ret; 1781 } 1782 1783 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, 1784 uint32_t clock) 1785 { 1786 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1787 1788 if (!pp_funcs->set_hard_min_dcefclk_by_freq) 1789 return; 1790 1791 mutex_lock(&adev->pm.mutex); 1792 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, 1793 clock); 1794 mutex_unlock(&adev->pm.mutex); 1795 } 1796 1797 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, 1798 uint32_t clock) 1799 { 1800 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1801 1802 if (!pp_funcs->set_hard_min_fclk_by_freq) 1803 return; 1804 1805 mutex_lock(&adev->pm.mutex); 1806 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, 1807 clock); 1808 mutex_unlock(&adev->pm.mutex); 1809 } 1810 1811 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, 1812 bool disable_memory_clock_switch) 1813 { 1814 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1815 int ret = 0; 1816 1817 if (!pp_funcs->display_disable_memory_clock_switch) 1818 return 0; 1819 1820 mutex_lock(&adev->pm.mutex); 1821 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, 1822 disable_memory_clock_switch); 1823 mutex_unlock(&adev->pm.mutex); 1824 1825 return ret; 1826 } 1827 1828 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, 1829 struct pp_smu_nv_clock_table *max_clocks) 1830 { 1831 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1832 int ret = 0; 1833 1834 if (!pp_funcs->get_max_sustainable_clocks_by_dc) 1835 return -EOPNOTSUPP; 1836 1837 mutex_lock(&adev->pm.mutex); 1838 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, 1839 max_clocks); 1840 mutex_unlock(&adev->pm.mutex); 1841 1842 return ret; 1843 } 1844 1845 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, 1846 unsigned int *clock_values_in_khz, 1847 unsigned int *num_states) 1848 { 1849 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1850 int ret = 0; 1851 1852 if (!pp_funcs->get_uclk_dpm_states) 1853 return -EOPNOTSUPP; 1854 1855 mutex_lock(&adev->pm.mutex); 1856 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, 1857 clock_values_in_khz, 1858 num_states); 1859 mutex_unlock(&adev->pm.mutex); 1860 1861 return ret; 1862 } 1863 1864 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, 1865 struct dpm_clocks *clock_table) 1866 { 1867 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1868 int ret = 0; 1869 1870 if (!pp_funcs->get_dpm_clock_table) 1871 return -EOPNOTSUPP; 1872 1873 mutex_lock(&adev->pm.mutex); 1874 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, 1875 clock_table); 1876 mutex_unlock(&adev->pm.mutex); 1877 1878 return ret; 1879 } 1880