1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "pp_debug.h" 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/gfp.h> 27 #include <linux/slab.h> 28 #include <linux/firmware.h> 29 #include <linux/reboot.h> 30 #include "amd_shared.h" 31 #include "amd_powerplay.h" 32 #include "power_state.h" 33 #include "amdgpu.h" 34 #include "hwmgr.h" 35 #include "amdgpu_dpm_internal.h" 36 #include "amdgpu_display.h" 37 38 static const struct amd_pm_funcs pp_dpm_funcs; 39 40 static int amd_powerplay_create(struct amdgpu_device *adev) 41 { 42 struct pp_hwmgr *hwmgr; 43 44 if (adev == NULL) 45 return -EINVAL; 46 47 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL); 48 if (hwmgr == NULL) 49 return -ENOMEM; 50 51 hwmgr->adev = adev; 52 hwmgr->not_vf = !amdgpu_sriov_vf(adev); 53 hwmgr->device = amdgpu_cgs_create_device(adev); 54 mutex_init(&hwmgr->msg_lock); 55 hwmgr->chip_family = adev->family; 56 hwmgr->chip_id = adev->asic_type; 57 hwmgr->feature_mask = adev->pm.pp_feature; 58 hwmgr->display_config = &adev->pm.pm_display_cfg; 59 adev->powerplay.pp_handle = hwmgr; 60 adev->powerplay.pp_funcs = &pp_dpm_funcs; 61 return 0; 62 } 63 64 65 static void amd_powerplay_destroy(struct amdgpu_device *adev) 66 { 67 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 68 69 mutex_destroy(&hwmgr->msg_lock); 70 71 kfree(hwmgr->hardcode_pp_table); 72 hwmgr->hardcode_pp_table = NULL; 73 74 kfree(hwmgr); 75 hwmgr = NULL; 76 } 77 78 static int pp_early_init(struct amdgpu_ip_block *ip_block) 79 { 80 int ret; 81 struct amdgpu_device *adev = ip_block->adev; 82 ret = amd_powerplay_create(adev); 83 84 if (ret != 0) 85 return ret; 86 87 ret = hwmgr_early_init(adev->powerplay.pp_handle); 88 if (ret) 89 return -EINVAL; 90 91 return 0; 92 } 93 94 static void pp_swctf_delayed_work_handler(struct work_struct *work) 95 { 96 struct pp_hwmgr *hwmgr = 97 container_of(work, struct pp_hwmgr, swctf_delayed_work.work); 98 struct amdgpu_device *adev = hwmgr->adev; 99 struct amdgpu_dpm_thermal *range = 100 &adev->pm.dpm.thermal; 101 uint32_t gpu_temperature, size = sizeof(gpu_temperature); 102 int ret; 103 104 /* 105 * If the hotspot/edge temperature is confirmed as below SW CTF setting point 106 * after the delay enforced, nothing will be done. 107 * Otherwise, a graceful shutdown will be performed to prevent further damage. 108 */ 109 if (range->sw_ctf_threshold && 110 hwmgr->hwmgr_func->read_sensor) { 111 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, 112 AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 113 &gpu_temperature, 114 &size); 115 /* 116 * For some legacy ASICs, hotspot temperature retrieving might be not 117 * supported. Check the edge temperature instead then. 118 */ 119 if (ret == -EOPNOTSUPP) 120 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, 121 AMDGPU_PP_SENSOR_EDGE_TEMP, 122 &gpu_temperature, 123 &size); 124 if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold) 125 return; 126 } 127 128 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 129 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 130 orderly_poweroff(true); 131 } 132 133 static int pp_sw_init(struct amdgpu_ip_block *ip_block) 134 { 135 struct amdgpu_device *adev = ip_block->adev; 136 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 137 int ret = 0; 138 139 ret = hwmgr_sw_init(hwmgr); 140 141 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully"); 142 143 if (!ret) 144 INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work, 145 pp_swctf_delayed_work_handler); 146 147 return ret; 148 } 149 150 static int pp_sw_fini(struct amdgpu_ip_block *ip_block) 151 { 152 struct amdgpu_device *adev = ip_block->adev; 153 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 154 155 hwmgr_sw_fini(hwmgr); 156 157 amdgpu_ucode_release(&adev->pm.fw); 158 159 return 0; 160 } 161 162 static int pp_hw_init(struct amdgpu_ip_block *ip_block) 163 { 164 int ret = 0; 165 struct amdgpu_device *adev = ip_block->adev; 166 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 167 168 ret = hwmgr_hw_init(hwmgr); 169 170 if (ret) 171 pr_err("powerplay hw init failed\n"); 172 173 return ret; 174 } 175 176 static int pp_hw_fini(struct amdgpu_ip_block *ip_block) 177 { 178 struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; 179 180 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); 181 182 hwmgr_hw_fini(hwmgr); 183 184 return 0; 185 } 186 187 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev) 188 { 189 int r = -EINVAL; 190 void *cpu_ptr = NULL; 191 uint64_t gpu_addr; 192 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 193 194 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size, 195 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 196 &adev->pm.smu_prv_buffer, 197 &gpu_addr, 198 &cpu_ptr)) { 199 DRM_ERROR("amdgpu: failed to create smu prv buffer\n"); 200 return; 201 } 202 203 if (hwmgr->hwmgr_func->notify_cac_buffer_info) 204 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, 205 lower_32_bits((unsigned long)cpu_ptr), 206 upper_32_bits((unsigned long)cpu_ptr), 207 lower_32_bits(gpu_addr), 208 upper_32_bits(gpu_addr), 209 adev->pm.smu_prv_buffer_size); 210 211 if (r) { 212 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); 213 adev->pm.smu_prv_buffer = NULL; 214 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n"); 215 } 216 } 217 218 static int pp_late_init(struct amdgpu_ip_block *ip_block) 219 { 220 struct amdgpu_device *adev = ip_block->adev; 221 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 222 223 if (hwmgr && hwmgr->pm_en) 224 hwmgr_handle_task(hwmgr, 225 AMD_PP_TASK_COMPLETE_INIT, NULL); 226 if (adev->pm.smu_prv_buffer_size != 0) 227 pp_reserve_vram_for_smu(adev); 228 229 return 0; 230 } 231 232 static void pp_late_fini(struct amdgpu_ip_block *ip_block) 233 { 234 struct amdgpu_device *adev = ip_block->adev; 235 236 if (adev->pm.smu_prv_buffer) 237 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); 238 amd_powerplay_destroy(adev); 239 } 240 241 242 static bool pp_is_idle(void *handle) 243 { 244 return false; 245 } 246 247 static int pp_set_powergating_state(void *handle, 248 enum amd_powergating_state state) 249 { 250 return 0; 251 } 252 253 static int pp_suspend(struct amdgpu_ip_block *ip_block) 254 { 255 struct amdgpu_device *adev = ip_block->adev; 256 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 257 258 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); 259 260 return hwmgr_suspend(hwmgr); 261 } 262 263 static int pp_resume(struct amdgpu_ip_block *ip_block) 264 { 265 struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; 266 267 return hwmgr_resume(hwmgr); 268 } 269 270 static int pp_set_clockgating_state(void *handle, 271 enum amd_clockgating_state state) 272 { 273 return 0; 274 } 275 276 static const struct amd_ip_funcs pp_ip_funcs = { 277 .name = "powerplay", 278 .early_init = pp_early_init, 279 .late_init = pp_late_init, 280 .sw_init = pp_sw_init, 281 .sw_fini = pp_sw_fini, 282 .hw_init = pp_hw_init, 283 .hw_fini = pp_hw_fini, 284 .late_fini = pp_late_fini, 285 .suspend = pp_suspend, 286 .resume = pp_resume, 287 .is_idle = pp_is_idle, 288 .set_clockgating_state = pp_set_clockgating_state, 289 .set_powergating_state = pp_set_powergating_state, 290 }; 291 292 const struct amdgpu_ip_block_version pp_smu_ip_block = 293 { 294 .type = AMD_IP_BLOCK_TYPE_SMC, 295 .major = 1, 296 .minor = 0, 297 .rev = 0, 298 .funcs = &pp_ip_funcs, 299 }; 300 301 /* This interface only be supported On Vi, 302 * because only smu7/8 can help to load gfx/sdma fw, 303 * smu need to be enabled before load other ip's fw. 304 * so call start smu to load smu7 fw and other ip's fw 305 */ 306 static int pp_dpm_load_fw(void *handle) 307 { 308 struct pp_hwmgr *hwmgr = handle; 309 310 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) 311 return -EINVAL; 312 313 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { 314 pr_err("fw load failed\n"); 315 return -EINVAL; 316 } 317 318 return 0; 319 } 320 321 static int pp_dpm_fw_loading_complete(void *handle) 322 { 323 return 0; 324 } 325 326 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) 327 { 328 struct pp_hwmgr *hwmgr = handle; 329 330 if (!hwmgr || !hwmgr->pm_en) 331 return -EINVAL; 332 333 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { 334 pr_info_ratelimited("%s was not implemented.\n", __func__); 335 return 0; 336 } 337 338 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); 339 } 340 341 static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, 342 enum amd_dpm_forced_level *level) 343 { 344 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 345 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 346 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 347 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 348 349 if (!(hwmgr->dpm_level & profile_mode_mask)) { 350 /* enter umd pstate, save current level, disable gfx cg*/ 351 if (*level & profile_mode_mask) { 352 hwmgr->saved_dpm_level = hwmgr->dpm_level; 353 hwmgr->en_umd_pstate = true; 354 } 355 } else { 356 /* exit umd pstate, restore level, enable gfx cg*/ 357 if (!(*level & profile_mode_mask)) { 358 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 359 *level = hwmgr->saved_dpm_level; 360 hwmgr->en_umd_pstate = false; 361 } 362 } 363 } 364 365 static int pp_dpm_force_performance_level(void *handle, 366 enum amd_dpm_forced_level level) 367 { 368 struct pp_hwmgr *hwmgr = handle; 369 370 if (!hwmgr || !hwmgr->pm_en) 371 return -EINVAL; 372 373 if (level == hwmgr->dpm_level) 374 return 0; 375 376 pp_dpm_en_umd_pstate(hwmgr, &level); 377 hwmgr->request_dpm_level = level; 378 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 379 380 return 0; 381 } 382 383 static enum amd_dpm_forced_level pp_dpm_get_performance_level( 384 void *handle) 385 { 386 struct pp_hwmgr *hwmgr = handle; 387 388 if (!hwmgr || !hwmgr->pm_en) 389 return -EINVAL; 390 391 return hwmgr->dpm_level; 392 } 393 394 static uint32_t pp_dpm_get_sclk(void *handle, bool low) 395 { 396 struct pp_hwmgr *hwmgr = handle; 397 398 if (!hwmgr || !hwmgr->pm_en) 399 return 0; 400 401 if (hwmgr->hwmgr_func->get_sclk == NULL) { 402 pr_info_ratelimited("%s was not implemented.\n", __func__); 403 return 0; 404 } 405 return hwmgr->hwmgr_func->get_sclk(hwmgr, low); 406 } 407 408 static uint32_t pp_dpm_get_mclk(void *handle, bool low) 409 { 410 struct pp_hwmgr *hwmgr = handle; 411 412 if (!hwmgr || !hwmgr->pm_en) 413 return 0; 414 415 if (hwmgr->hwmgr_func->get_mclk == NULL) { 416 pr_info_ratelimited("%s was not implemented.\n", __func__); 417 return 0; 418 } 419 return hwmgr->hwmgr_func->get_mclk(hwmgr, low); 420 } 421 422 static void pp_dpm_powergate_vce(void *handle, bool gate) 423 { 424 struct pp_hwmgr *hwmgr = handle; 425 426 if (!hwmgr || !hwmgr->pm_en) 427 return; 428 429 if (hwmgr->hwmgr_func->powergate_vce == NULL) { 430 pr_info_ratelimited("%s was not implemented.\n", __func__); 431 return; 432 } 433 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); 434 } 435 436 static void pp_dpm_powergate_uvd(void *handle, bool gate) 437 { 438 struct pp_hwmgr *hwmgr = handle; 439 440 if (!hwmgr || !hwmgr->pm_en) 441 return; 442 443 if (hwmgr->hwmgr_func->powergate_uvd == NULL) { 444 pr_info_ratelimited("%s was not implemented.\n", __func__); 445 return; 446 } 447 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); 448 } 449 450 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 451 enum amd_pm_state_type *user_state) 452 { 453 struct pp_hwmgr *hwmgr = handle; 454 455 if (!hwmgr || !hwmgr->pm_en) 456 return -EINVAL; 457 458 return hwmgr_handle_task(hwmgr, task_id, user_state); 459 } 460 461 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) 462 { 463 struct pp_hwmgr *hwmgr = handle; 464 struct pp_power_state *state; 465 enum amd_pm_state_type pm_type; 466 467 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps) 468 return -EINVAL; 469 470 state = hwmgr->current_ps; 471 472 switch (state->classification.ui_label) { 473 case PP_StateUILabel_Battery: 474 pm_type = POWER_STATE_TYPE_BATTERY; 475 break; 476 case PP_StateUILabel_Balanced: 477 pm_type = POWER_STATE_TYPE_BALANCED; 478 break; 479 case PP_StateUILabel_Performance: 480 pm_type = POWER_STATE_TYPE_PERFORMANCE; 481 break; 482 default: 483 if (state->classification.flags & PP_StateClassificationFlag_Boot) 484 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; 485 else 486 pm_type = POWER_STATE_TYPE_DEFAULT; 487 break; 488 } 489 490 return pm_type; 491 } 492 493 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) 494 { 495 struct pp_hwmgr *hwmgr = handle; 496 497 if (!hwmgr || !hwmgr->pm_en) 498 return -EOPNOTSUPP; 499 500 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) 501 return -EOPNOTSUPP; 502 503 if (mode == U32_MAX) 504 return -EINVAL; 505 506 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); 507 508 return 0; 509 } 510 511 static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode) 512 { 513 struct pp_hwmgr *hwmgr = handle; 514 515 if (!hwmgr || !hwmgr->pm_en) 516 return -EOPNOTSUPP; 517 518 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) 519 return -EOPNOTSUPP; 520 521 if (!fan_mode) 522 return -EINVAL; 523 524 *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); 525 return 0; 526 } 527 528 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed) 529 { 530 struct pp_hwmgr *hwmgr = handle; 531 532 if (!hwmgr || !hwmgr->pm_en) 533 return -EOPNOTSUPP; 534 535 if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) 536 return -EOPNOTSUPP; 537 538 if (speed == U32_MAX) 539 return -EINVAL; 540 541 return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed); 542 } 543 544 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed) 545 { 546 struct pp_hwmgr *hwmgr = handle; 547 548 if (!hwmgr || !hwmgr->pm_en) 549 return -EOPNOTSUPP; 550 551 if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) 552 return -EOPNOTSUPP; 553 554 if (!speed) 555 return -EINVAL; 556 557 return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed); 558 } 559 560 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) 561 { 562 struct pp_hwmgr *hwmgr = handle; 563 564 if (!hwmgr || !hwmgr->pm_en) 565 return -EOPNOTSUPP; 566 567 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) 568 return -EOPNOTSUPP; 569 570 if (!rpm) 571 return -EINVAL; 572 573 return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); 574 } 575 576 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) 577 { 578 struct pp_hwmgr *hwmgr = handle; 579 580 if (!hwmgr || !hwmgr->pm_en) 581 return -EOPNOTSUPP; 582 583 if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) 584 return -EOPNOTSUPP; 585 586 if (rpm == U32_MAX) 587 return -EINVAL; 588 589 return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm); 590 } 591 592 static int pp_dpm_get_pp_num_states(void *handle, 593 struct pp_states_info *data) 594 { 595 struct pp_hwmgr *hwmgr = handle; 596 int i; 597 598 memset(data, 0, sizeof(*data)); 599 600 if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps) 601 return -EINVAL; 602 603 data->nums = hwmgr->num_ps; 604 605 for (i = 0; i < hwmgr->num_ps; i++) { 606 struct pp_power_state *state = (struct pp_power_state *) 607 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size); 608 switch (state->classification.ui_label) { 609 case PP_StateUILabel_Battery: 610 data->states[i] = POWER_STATE_TYPE_BATTERY; 611 break; 612 case PP_StateUILabel_Balanced: 613 data->states[i] = POWER_STATE_TYPE_BALANCED; 614 break; 615 case PP_StateUILabel_Performance: 616 data->states[i] = POWER_STATE_TYPE_PERFORMANCE; 617 break; 618 default: 619 if (state->classification.flags & PP_StateClassificationFlag_Boot) 620 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT; 621 else 622 data->states[i] = POWER_STATE_TYPE_DEFAULT; 623 } 624 } 625 return 0; 626 } 627 628 static int pp_dpm_get_pp_table(void *handle, char **table) 629 { 630 struct pp_hwmgr *hwmgr = handle; 631 632 if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table) 633 return -EINVAL; 634 635 *table = (char *)hwmgr->soft_pp_table; 636 return hwmgr->soft_pp_table_size; 637 } 638 639 static int amd_powerplay_reset(void *handle) 640 { 641 struct pp_hwmgr *hwmgr = handle; 642 int ret; 643 644 ret = hwmgr_hw_fini(hwmgr); 645 if (ret) 646 return ret; 647 648 ret = hwmgr_hw_init(hwmgr); 649 if (ret) 650 return ret; 651 652 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL); 653 } 654 655 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 656 { 657 struct pp_hwmgr *hwmgr = handle; 658 int ret = -ENOMEM; 659 660 if (!hwmgr || !hwmgr->pm_en) 661 return -EINVAL; 662 663 if (!hwmgr->hardcode_pp_table) { 664 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, 665 hwmgr->soft_pp_table_size, 666 GFP_KERNEL); 667 if (!hwmgr->hardcode_pp_table) 668 return ret; 669 } 670 671 memcpy(hwmgr->hardcode_pp_table, buf, size); 672 673 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; 674 675 ret = amd_powerplay_reset(handle); 676 if (ret) 677 return ret; 678 679 if (hwmgr->hwmgr_func->avfs_control) 680 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false); 681 682 return ret; 683 } 684 685 static int pp_dpm_force_clock_level(void *handle, 686 enum pp_clock_type type, uint32_t mask) 687 { 688 struct pp_hwmgr *hwmgr = handle; 689 690 if (!hwmgr || !hwmgr->pm_en) 691 return -EINVAL; 692 693 if (hwmgr->hwmgr_func->force_clock_level == NULL) { 694 pr_info_ratelimited("%s was not implemented.\n", __func__); 695 return 0; 696 } 697 698 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 699 pr_debug("force clock level is for dpm manual mode only.\n"); 700 return -EINVAL; 701 } 702 703 return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); 704 } 705 706 static int pp_dpm_emit_clock_levels(void *handle, 707 enum pp_clock_type type, 708 char *buf, 709 int *offset) 710 { 711 struct pp_hwmgr *hwmgr = handle; 712 713 if (!hwmgr || !hwmgr->pm_en) 714 return -EOPNOTSUPP; 715 716 if (!hwmgr->hwmgr_func->emit_clock_levels) 717 return -ENOENT; 718 719 return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset); 720 } 721 722 static int pp_dpm_print_clock_levels(void *handle, 723 enum pp_clock_type type, char *buf) 724 { 725 struct pp_hwmgr *hwmgr = handle; 726 727 if (!hwmgr || !hwmgr->pm_en) 728 return -EINVAL; 729 730 if (hwmgr->hwmgr_func->print_clock_levels == NULL) { 731 pr_info_ratelimited("%s was not implemented.\n", __func__); 732 return 0; 733 } 734 return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); 735 } 736 737 static int pp_dpm_get_sclk_od(void *handle) 738 { 739 struct pp_hwmgr *hwmgr = handle; 740 741 if (!hwmgr || !hwmgr->pm_en) 742 return -EINVAL; 743 744 if (hwmgr->hwmgr_func->get_sclk_od == NULL) { 745 pr_info_ratelimited("%s was not implemented.\n", __func__); 746 return 0; 747 } 748 return hwmgr->hwmgr_func->get_sclk_od(hwmgr); 749 } 750 751 static int pp_dpm_set_sclk_od(void *handle, uint32_t value) 752 { 753 struct pp_hwmgr *hwmgr = handle; 754 755 if (!hwmgr || !hwmgr->pm_en) 756 return -EINVAL; 757 758 if (hwmgr->hwmgr_func->set_sclk_od == NULL) { 759 pr_info_ratelimited("%s was not implemented.\n", __func__); 760 return 0; 761 } 762 763 return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); 764 } 765 766 static int pp_dpm_get_mclk_od(void *handle) 767 { 768 struct pp_hwmgr *hwmgr = handle; 769 770 if (!hwmgr || !hwmgr->pm_en) 771 return -EINVAL; 772 773 if (hwmgr->hwmgr_func->get_mclk_od == NULL) { 774 pr_info_ratelimited("%s was not implemented.\n", __func__); 775 return 0; 776 } 777 return hwmgr->hwmgr_func->get_mclk_od(hwmgr); 778 } 779 780 static int pp_dpm_set_mclk_od(void *handle, uint32_t value) 781 { 782 struct pp_hwmgr *hwmgr = handle; 783 784 if (!hwmgr || !hwmgr->pm_en) 785 return -EINVAL; 786 787 if (hwmgr->hwmgr_func->set_mclk_od == NULL) { 788 pr_info_ratelimited("%s was not implemented.\n", __func__); 789 return 0; 790 } 791 return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); 792 } 793 794 static int pp_dpm_read_sensor(void *handle, int idx, 795 void *value, int *size) 796 { 797 struct pp_hwmgr *hwmgr = handle; 798 799 if (!hwmgr || !hwmgr->pm_en || !value) 800 return -EINVAL; 801 802 switch (idx) { 803 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 804 *((uint32_t *)value) = hwmgr->pstate_sclk * 100; 805 return 0; 806 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 807 *((uint32_t *)value) = hwmgr->pstate_mclk * 100; 808 return 0; 809 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 810 *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100; 811 return 0; 812 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 813 *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100; 814 return 0; 815 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 816 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM; 817 return 0; 818 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 819 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM; 820 return 0; 821 default: 822 return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); 823 } 824 } 825 826 static struct amd_vce_state* 827 pp_dpm_get_vce_clock_state(void *handle, unsigned idx) 828 { 829 struct pp_hwmgr *hwmgr = handle; 830 831 if (!hwmgr || !hwmgr->pm_en) 832 return NULL; 833 834 if (idx < hwmgr->num_vce_state_tables) 835 return &hwmgr->vce_states[idx]; 836 return NULL; 837 } 838 839 static int pp_get_power_profile_mode(void *handle, char *buf) 840 { 841 struct pp_hwmgr *hwmgr = handle; 842 843 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode) 844 return -EOPNOTSUPP; 845 if (!buf) 846 return -EINVAL; 847 848 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); 849 } 850 851 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) 852 { 853 struct pp_hwmgr *hwmgr = handle; 854 855 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode) 856 return -EOPNOTSUPP; 857 858 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 859 pr_debug("power profile setting is for manual dpm mode only.\n"); 860 return -EINVAL; 861 } 862 863 return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); 864 } 865 866 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size) 867 { 868 struct pp_hwmgr *hwmgr = handle; 869 870 if (!hwmgr || !hwmgr->pm_en) 871 return -EINVAL; 872 873 if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL) 874 return 0; 875 876 return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size); 877 } 878 879 static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, 880 long *input, uint32_t size) 881 { 882 struct pp_hwmgr *hwmgr = handle; 883 884 if (!hwmgr || !hwmgr->pm_en) 885 return -EINVAL; 886 887 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { 888 pr_info_ratelimited("%s was not implemented.\n", __func__); 889 return 0; 890 } 891 892 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size); 893 } 894 895 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state) 896 { 897 struct pp_hwmgr *hwmgr = handle; 898 899 if (!hwmgr) 900 return -EINVAL; 901 902 if (!hwmgr->pm_en) 903 return 0; 904 905 if (hwmgr->hwmgr_func->set_mp1_state) 906 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state); 907 908 return 0; 909 } 910 911 static int pp_dpm_switch_power_profile(void *handle, 912 enum PP_SMC_POWER_PROFILE type, bool en) 913 { 914 struct pp_hwmgr *hwmgr = handle; 915 long workload[1]; 916 uint32_t index; 917 918 if (!hwmgr || !hwmgr->pm_en) 919 return -EINVAL; 920 921 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 922 pr_info_ratelimited("%s was not implemented.\n", __func__); 923 return -EINVAL; 924 } 925 926 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 927 return -EINVAL; 928 929 if (!en) { 930 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); 931 index = fls(hwmgr->workload_mask); 932 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; 933 workload[0] = hwmgr->workload_setting[index]; 934 } else { 935 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]); 936 index = fls(hwmgr->workload_mask); 937 index = index <= Workload_Policy_Max ? index - 1 : 0; 938 workload[0] = hwmgr->workload_setting[index]; 939 } 940 941 if (type == PP_SMC_POWER_PROFILE_COMPUTE && 942 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) { 943 if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) 944 return -EINVAL; 945 } 946 947 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 948 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); 949 950 return 0; 951 } 952 953 static int pp_set_power_limit(void *handle, uint32_t limit) 954 { 955 struct pp_hwmgr *hwmgr = handle; 956 uint32_t max_power_limit; 957 958 if (!hwmgr || !hwmgr->pm_en) 959 return -EINVAL; 960 961 if (hwmgr->hwmgr_func->set_power_limit == NULL) { 962 pr_info_ratelimited("%s was not implemented.\n", __func__); 963 return -EINVAL; 964 } 965 966 if (limit == 0) 967 limit = hwmgr->default_power_limit; 968 969 max_power_limit = hwmgr->default_power_limit; 970 if (hwmgr->od_enabled) { 971 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); 972 max_power_limit /= 100; 973 } 974 975 if (limit > max_power_limit) 976 return -EINVAL; 977 978 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); 979 hwmgr->power_limit = limit; 980 return 0; 981 } 982 983 static int pp_get_power_limit(void *handle, uint32_t *limit, 984 enum pp_power_limit_level pp_limit_level, 985 enum pp_power_type power_type) 986 { 987 struct pp_hwmgr *hwmgr = handle; 988 int ret = 0; 989 990 if (!hwmgr || !hwmgr->pm_en || !limit) 991 return -EINVAL; 992 993 if (power_type != PP_PWR_TYPE_SUSTAINED) 994 return -EOPNOTSUPP; 995 996 switch (pp_limit_level) { 997 case PP_PWR_LIMIT_CURRENT: 998 *limit = hwmgr->power_limit; 999 break; 1000 case PP_PWR_LIMIT_DEFAULT: 1001 *limit = hwmgr->default_power_limit; 1002 break; 1003 case PP_PWR_LIMIT_MAX: 1004 *limit = hwmgr->default_power_limit; 1005 if (hwmgr->od_enabled) { 1006 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); 1007 *limit /= 100; 1008 } 1009 break; 1010 case PP_PWR_LIMIT_MIN: 1011 *limit = 0; 1012 break; 1013 default: 1014 ret = -EOPNOTSUPP; 1015 break; 1016 } 1017 1018 return ret; 1019 } 1020 1021 static int pp_display_configuration_change(void *handle, 1022 const struct amd_pp_display_configuration *display_config) 1023 { 1024 struct pp_hwmgr *hwmgr = handle; 1025 1026 if (!hwmgr || !hwmgr->pm_en) 1027 return -EINVAL; 1028 1029 phm_store_dal_configuration_data(hwmgr, display_config); 1030 return 0; 1031 } 1032 1033 static int pp_get_display_power_level(void *handle, 1034 struct amd_pp_simple_clock_info *output) 1035 { 1036 struct pp_hwmgr *hwmgr = handle; 1037 1038 if (!hwmgr || !hwmgr->pm_en || !output) 1039 return -EINVAL; 1040 1041 return phm_get_dal_power_level(hwmgr, output); 1042 } 1043 1044 static int pp_get_current_clocks(void *handle, 1045 struct amd_pp_clock_info *clocks) 1046 { 1047 struct amd_pp_simple_clock_info simple_clocks = { 0 }; 1048 struct pp_clock_info hw_clocks; 1049 struct pp_hwmgr *hwmgr = handle; 1050 int ret = 0; 1051 1052 if (!hwmgr || !hwmgr->pm_en) 1053 return -EINVAL; 1054 1055 phm_get_dal_power_level(hwmgr, &simple_clocks); 1056 1057 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1058 PHM_PlatformCaps_PowerContainment)) 1059 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, 1060 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment); 1061 else 1062 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, 1063 &hw_clocks, PHM_PerformanceLevelDesignation_Activity); 1064 1065 if (ret) { 1066 pr_debug("Error in phm_get_clock_info \n"); 1067 return -EINVAL; 1068 } 1069 1070 clocks->min_engine_clock = hw_clocks.min_eng_clk; 1071 clocks->max_engine_clock = hw_clocks.max_eng_clk; 1072 clocks->min_memory_clock = hw_clocks.min_mem_clk; 1073 clocks->max_memory_clock = hw_clocks.max_mem_clk; 1074 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; 1075 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; 1076 1077 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1078 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1079 1080 if (simple_clocks.level == 0) 1081 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; 1082 else 1083 clocks->max_clocks_state = simple_clocks.level; 1084 1085 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) { 1086 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1087 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1088 } 1089 return 0; 1090 } 1091 1092 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 1093 { 1094 struct pp_hwmgr *hwmgr = handle; 1095 1096 if (!hwmgr || !hwmgr->pm_en) 1097 return -EINVAL; 1098 1099 if (clocks == NULL) 1100 return -EINVAL; 1101 1102 return phm_get_clock_by_type(hwmgr, type, clocks); 1103 } 1104 1105 static int pp_get_clock_by_type_with_latency(void *handle, 1106 enum amd_pp_clock_type type, 1107 struct pp_clock_levels_with_latency *clocks) 1108 { 1109 struct pp_hwmgr *hwmgr = handle; 1110 1111 if (!hwmgr || !hwmgr->pm_en || !clocks) 1112 return -EINVAL; 1113 1114 return phm_get_clock_by_type_with_latency(hwmgr, type, clocks); 1115 } 1116 1117 static int pp_get_clock_by_type_with_voltage(void *handle, 1118 enum amd_pp_clock_type type, 1119 struct pp_clock_levels_with_voltage *clocks) 1120 { 1121 struct pp_hwmgr *hwmgr = handle; 1122 1123 if (!hwmgr || !hwmgr->pm_en || !clocks) 1124 return -EINVAL; 1125 1126 return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); 1127 } 1128 1129 static int pp_set_watermarks_for_clocks_ranges(void *handle, 1130 void *clock_ranges) 1131 { 1132 struct pp_hwmgr *hwmgr = handle; 1133 1134 if (!hwmgr || !hwmgr->pm_en || !clock_ranges) 1135 return -EINVAL; 1136 1137 return phm_set_watermarks_for_clocks_ranges(hwmgr, 1138 clock_ranges); 1139 } 1140 1141 static int pp_display_clock_voltage_request(void *handle, 1142 struct pp_display_clock_request *clock) 1143 { 1144 struct pp_hwmgr *hwmgr = handle; 1145 1146 if (!hwmgr || !hwmgr->pm_en || !clock) 1147 return -EINVAL; 1148 1149 return phm_display_clock_voltage_request(hwmgr, clock); 1150 } 1151 1152 static int pp_get_display_mode_validation_clocks(void *handle, 1153 struct amd_pp_simple_clock_info *clocks) 1154 { 1155 struct pp_hwmgr *hwmgr = handle; 1156 int ret = 0; 1157 1158 if (!hwmgr || !hwmgr->pm_en || !clocks) 1159 return -EINVAL; 1160 1161 clocks->level = PP_DAL_POWERLEVEL_7; 1162 1163 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) 1164 ret = phm_get_max_high_clocks(hwmgr, clocks); 1165 1166 return ret; 1167 } 1168 1169 static int pp_dpm_powergate_mmhub(void *handle) 1170 { 1171 struct pp_hwmgr *hwmgr = handle; 1172 1173 if (!hwmgr || !hwmgr->pm_en) 1174 return -EINVAL; 1175 1176 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) { 1177 pr_info_ratelimited("%s was not implemented.\n", __func__); 1178 return 0; 1179 } 1180 1181 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr); 1182 } 1183 1184 static int pp_dpm_powergate_gfx(void *handle, bool gate) 1185 { 1186 struct pp_hwmgr *hwmgr = handle; 1187 1188 if (!hwmgr || !hwmgr->pm_en) 1189 return 0; 1190 1191 if (hwmgr->hwmgr_func->powergate_gfx == NULL) { 1192 pr_info_ratelimited("%s was not implemented.\n", __func__); 1193 return 0; 1194 } 1195 1196 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate); 1197 } 1198 1199 static void pp_dpm_powergate_acp(void *handle, bool gate) 1200 { 1201 struct pp_hwmgr *hwmgr = handle; 1202 1203 if (!hwmgr || !hwmgr->pm_en) 1204 return; 1205 1206 if (hwmgr->hwmgr_func->powergate_acp == NULL) { 1207 pr_info_ratelimited("%s was not implemented.\n", __func__); 1208 return; 1209 } 1210 1211 hwmgr->hwmgr_func->powergate_acp(hwmgr, gate); 1212 } 1213 1214 static void pp_dpm_powergate_sdma(void *handle, bool gate) 1215 { 1216 struct pp_hwmgr *hwmgr = handle; 1217 1218 if (!hwmgr) 1219 return; 1220 1221 if (hwmgr->hwmgr_func->powergate_sdma == NULL) { 1222 pr_info_ratelimited("%s was not implemented.\n", __func__); 1223 return; 1224 } 1225 1226 hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate); 1227 } 1228 1229 static int pp_set_powergating_by_smu(void *handle, 1230 uint32_t block_type, bool gate) 1231 { 1232 int ret = 0; 1233 1234 switch (block_type) { 1235 case AMD_IP_BLOCK_TYPE_UVD: 1236 case AMD_IP_BLOCK_TYPE_VCN: 1237 pp_dpm_powergate_uvd(handle, gate); 1238 break; 1239 case AMD_IP_BLOCK_TYPE_VCE: 1240 pp_dpm_powergate_vce(handle, gate); 1241 break; 1242 case AMD_IP_BLOCK_TYPE_GMC: 1243 /* 1244 * For now, this is only used on PICASSO. 1245 * And only "gate" operation is supported. 1246 */ 1247 if (gate) 1248 pp_dpm_powergate_mmhub(handle); 1249 break; 1250 case AMD_IP_BLOCK_TYPE_GFX: 1251 ret = pp_dpm_powergate_gfx(handle, gate); 1252 break; 1253 case AMD_IP_BLOCK_TYPE_ACP: 1254 pp_dpm_powergate_acp(handle, gate); 1255 break; 1256 case AMD_IP_BLOCK_TYPE_SDMA: 1257 pp_dpm_powergate_sdma(handle, gate); 1258 break; 1259 default: 1260 break; 1261 } 1262 return ret; 1263 } 1264 1265 static int pp_notify_smu_enable_pwe(void *handle) 1266 { 1267 struct pp_hwmgr *hwmgr = handle; 1268 1269 if (!hwmgr || !hwmgr->pm_en) 1270 return -EINVAL; 1271 1272 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) { 1273 pr_info_ratelimited("%s was not implemented.\n", __func__); 1274 return -EINVAL; 1275 } 1276 1277 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr); 1278 1279 return 0; 1280 } 1281 1282 static int pp_enable_mgpu_fan_boost(void *handle) 1283 { 1284 struct pp_hwmgr *hwmgr = handle; 1285 1286 if (!hwmgr) 1287 return -EINVAL; 1288 1289 if (!hwmgr->pm_en || 1290 hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) 1291 return 0; 1292 1293 hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr); 1294 1295 return 0; 1296 } 1297 1298 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock) 1299 { 1300 struct pp_hwmgr *hwmgr = handle; 1301 1302 if (!hwmgr || !hwmgr->pm_en) 1303 return -EINVAL; 1304 1305 if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) { 1306 pr_debug("%s was not implemented.\n", __func__); 1307 return -EINVAL; 1308 } 1309 1310 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock); 1311 1312 return 0; 1313 } 1314 1315 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock) 1316 { 1317 struct pp_hwmgr *hwmgr = handle; 1318 1319 if (!hwmgr || !hwmgr->pm_en) 1320 return -EINVAL; 1321 1322 if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) { 1323 pr_debug("%s was not implemented.\n", __func__); 1324 return -EINVAL; 1325 } 1326 1327 hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock); 1328 1329 return 0; 1330 } 1331 1332 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock) 1333 { 1334 struct pp_hwmgr *hwmgr = handle; 1335 1336 if (!hwmgr || !hwmgr->pm_en) 1337 return -EINVAL; 1338 1339 if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) { 1340 pr_debug("%s was not implemented.\n", __func__); 1341 return -EINVAL; 1342 } 1343 1344 hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock); 1345 1346 return 0; 1347 } 1348 1349 static int pp_set_active_display_count(void *handle, uint32_t count) 1350 { 1351 struct pp_hwmgr *hwmgr = handle; 1352 1353 if (!hwmgr || !hwmgr->pm_en) 1354 return -EINVAL; 1355 1356 return phm_set_active_display_count(hwmgr, count); 1357 } 1358 1359 static int pp_get_asic_baco_capability(void *handle) 1360 { 1361 struct pp_hwmgr *hwmgr = handle; 1362 1363 if (!hwmgr) 1364 return false; 1365 1366 if (!(hwmgr->not_vf && amdgpu_dpm) || 1367 !hwmgr->hwmgr_func->get_bamaco_support) 1368 return false; 1369 1370 return hwmgr->hwmgr_func->get_bamaco_support(hwmgr); 1371 } 1372 1373 static int pp_get_asic_baco_state(void *handle, int *state) 1374 { 1375 struct pp_hwmgr *hwmgr = handle; 1376 1377 if (!hwmgr) 1378 return -EINVAL; 1379 1380 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state) 1381 return 0; 1382 1383 hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state); 1384 1385 return 0; 1386 } 1387 1388 static int pp_set_asic_baco_state(void *handle, int state) 1389 { 1390 struct pp_hwmgr *hwmgr = handle; 1391 1392 if (!hwmgr) 1393 return -EINVAL; 1394 1395 if (!(hwmgr->not_vf && amdgpu_dpm) || 1396 !hwmgr->hwmgr_func->set_asic_baco_state) 1397 return 0; 1398 1399 hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state); 1400 1401 return 0; 1402 } 1403 1404 static int pp_get_ppfeature_status(void *handle, char *buf) 1405 { 1406 struct pp_hwmgr *hwmgr = handle; 1407 1408 if (!hwmgr || !hwmgr->pm_en || !buf) 1409 return -EINVAL; 1410 1411 if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) { 1412 pr_info_ratelimited("%s was not implemented.\n", __func__); 1413 return -EINVAL; 1414 } 1415 1416 return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf); 1417 } 1418 1419 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks) 1420 { 1421 struct pp_hwmgr *hwmgr = handle; 1422 1423 if (!hwmgr || !hwmgr->pm_en) 1424 return -EINVAL; 1425 1426 if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) { 1427 pr_info_ratelimited("%s was not implemented.\n", __func__); 1428 return -EINVAL; 1429 } 1430 1431 return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks); 1432 } 1433 1434 static int pp_asic_reset_mode_2(void *handle) 1435 { 1436 struct pp_hwmgr *hwmgr = handle; 1437 1438 if (!hwmgr || !hwmgr->pm_en) 1439 return -EINVAL; 1440 1441 if (hwmgr->hwmgr_func->asic_reset == NULL) { 1442 pr_info_ratelimited("%s was not implemented.\n", __func__); 1443 return -EINVAL; 1444 } 1445 1446 return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2); 1447 } 1448 1449 static int pp_smu_i2c_bus_access(void *handle, bool acquire) 1450 { 1451 struct pp_hwmgr *hwmgr = handle; 1452 1453 if (!hwmgr || !hwmgr->pm_en) 1454 return -EINVAL; 1455 1456 if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) { 1457 pr_info_ratelimited("%s was not implemented.\n", __func__); 1458 return -EINVAL; 1459 } 1460 1461 return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire); 1462 } 1463 1464 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) 1465 { 1466 struct pp_hwmgr *hwmgr = handle; 1467 1468 if (!hwmgr) 1469 return -EINVAL; 1470 1471 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate) 1472 return 0; 1473 1474 hwmgr->hwmgr_func->set_df_cstate(hwmgr, state); 1475 1476 return 0; 1477 } 1478 1479 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate) 1480 { 1481 struct pp_hwmgr *hwmgr = handle; 1482 1483 if (!hwmgr) 1484 return -EINVAL; 1485 1486 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate) 1487 return 0; 1488 1489 hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate); 1490 1491 return 0; 1492 } 1493 1494 static ssize_t pp_get_gpu_metrics(void *handle, void **table) 1495 { 1496 struct pp_hwmgr *hwmgr = handle; 1497 1498 if (!hwmgr) 1499 return -EINVAL; 1500 1501 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics) 1502 return -EOPNOTSUPP; 1503 1504 return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table); 1505 } 1506 1507 static int pp_gfx_state_change_set(void *handle, uint32_t state) 1508 { 1509 struct pp_hwmgr *hwmgr = handle; 1510 1511 if (!hwmgr || !hwmgr->pm_en) 1512 return -EINVAL; 1513 1514 if (hwmgr->hwmgr_func->gfx_state_change == NULL) { 1515 pr_info_ratelimited("%s was not implemented.\n", __func__); 1516 return -EINVAL; 1517 } 1518 1519 hwmgr->hwmgr_func->gfx_state_change(hwmgr, state); 1520 return 0; 1521 } 1522 1523 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size) 1524 { 1525 struct pp_hwmgr *hwmgr = handle; 1526 struct amdgpu_device *adev = hwmgr->adev; 1527 int err; 1528 1529 if (!addr || !size) 1530 return -EINVAL; 1531 1532 *addr = NULL; 1533 *size = 0; 1534 if (adev->pm.smu_prv_buffer) { 1535 err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr); 1536 if (err) 1537 return err; 1538 *size = adev->pm.smu_prv_buffer_size; 1539 } 1540 1541 return 0; 1542 } 1543 1544 static void pp_pm_compute_clocks(void *handle) 1545 { 1546 struct pp_hwmgr *hwmgr = handle; 1547 struct amdgpu_device *adev = hwmgr->adev; 1548 1549 if (!adev->dc_enabled) { 1550 amdgpu_dpm_get_active_displays(adev); 1551 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 1552 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1553 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1554 /* we have issues with mclk switching with 1555 * refresh rates over 120 hz on the non-DC code. 1556 */ 1557 if (adev->pm.pm_display_cfg.vrefresh > 120) 1558 adev->pm.pm_display_cfg.min_vblank_time = 0; 1559 1560 pp_display_configuration_change(handle, 1561 &adev->pm.pm_display_cfg); 1562 } 1563 1564 pp_dpm_dispatch_tasks(handle, 1565 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, 1566 NULL); 1567 } 1568 1569 static const struct amd_pm_funcs pp_dpm_funcs = { 1570 .load_firmware = pp_dpm_load_fw, 1571 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, 1572 .force_performance_level = pp_dpm_force_performance_level, 1573 .get_performance_level = pp_dpm_get_performance_level, 1574 .get_current_power_state = pp_dpm_get_current_power_state, 1575 .dispatch_tasks = pp_dpm_dispatch_tasks, 1576 .set_fan_control_mode = pp_dpm_set_fan_control_mode, 1577 .get_fan_control_mode = pp_dpm_get_fan_control_mode, 1578 .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm, 1579 .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm, 1580 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm, 1581 .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm, 1582 .get_pp_num_states = pp_dpm_get_pp_num_states, 1583 .get_pp_table = pp_dpm_get_pp_table, 1584 .set_pp_table = pp_dpm_set_pp_table, 1585 .force_clock_level = pp_dpm_force_clock_level, 1586 .emit_clock_levels = pp_dpm_emit_clock_levels, 1587 .print_clock_levels = pp_dpm_print_clock_levels, 1588 .get_sclk_od = pp_dpm_get_sclk_od, 1589 .set_sclk_od = pp_dpm_set_sclk_od, 1590 .get_mclk_od = pp_dpm_get_mclk_od, 1591 .set_mclk_od = pp_dpm_set_mclk_od, 1592 .read_sensor = pp_dpm_read_sensor, 1593 .get_vce_clock_state = pp_dpm_get_vce_clock_state, 1594 .switch_power_profile = pp_dpm_switch_power_profile, 1595 .set_clockgating_by_smu = pp_set_clockgating_by_smu, 1596 .set_powergating_by_smu = pp_set_powergating_by_smu, 1597 .get_power_profile_mode = pp_get_power_profile_mode, 1598 .set_power_profile_mode = pp_set_power_profile_mode, 1599 .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol, 1600 .odn_edit_dpm_table = pp_odn_edit_dpm_table, 1601 .set_mp1_state = pp_dpm_set_mp1_state, 1602 .set_power_limit = pp_set_power_limit, 1603 .get_power_limit = pp_get_power_limit, 1604 /* export to DC */ 1605 .get_sclk = pp_dpm_get_sclk, 1606 .get_mclk = pp_dpm_get_mclk, 1607 .display_configuration_change = pp_display_configuration_change, 1608 .get_display_power_level = pp_get_display_power_level, 1609 .get_current_clocks = pp_get_current_clocks, 1610 .get_clock_by_type = pp_get_clock_by_type, 1611 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency, 1612 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage, 1613 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges, 1614 .display_clock_voltage_request = pp_display_clock_voltage_request, 1615 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks, 1616 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe, 1617 .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost, 1618 .set_active_display_count = pp_set_active_display_count, 1619 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk, 1620 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq, 1621 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq, 1622 .get_asic_baco_capability = pp_get_asic_baco_capability, 1623 .get_asic_baco_state = pp_get_asic_baco_state, 1624 .set_asic_baco_state = pp_set_asic_baco_state, 1625 .get_ppfeature_status = pp_get_ppfeature_status, 1626 .set_ppfeature_status = pp_set_ppfeature_status, 1627 .asic_reset_mode_2 = pp_asic_reset_mode_2, 1628 .smu_i2c_bus_access = pp_smu_i2c_bus_access, 1629 .set_df_cstate = pp_set_df_cstate, 1630 .set_xgmi_pstate = pp_set_xgmi_pstate, 1631 .get_gpu_metrics = pp_get_gpu_metrics, 1632 .gfx_state_change_set = pp_gfx_state_change_set, 1633 .get_smu_prv_buf_details = pp_get_prv_buffer_details, 1634 .pm_compute_clocks = pp_pm_compute_clocks, 1635 }; 1636