1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/types.h> 24 #include <linux/kernel.h> 25 #include <linux/gfp.h> 26 #include <linux/slab.h> 27 #include <linux/firmware.h> 28 #include <linux/reboot.h> 29 #include "amd_shared.h" 30 #include "power_state.h" 31 #include "amdgpu.h" 32 #include "hwmgr.h" 33 #include "amdgpu_dpm_internal.h" 34 35 static const struct amd_pm_funcs pp_dpm_funcs; 36 37 static int amd_powerplay_create(struct amdgpu_device *adev) 38 { 39 struct pp_hwmgr *hwmgr; 40 41 if (adev == NULL) 42 return -EINVAL; 43 44 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL); 45 if (hwmgr == NULL) 46 return -ENOMEM; 47 48 hwmgr->adev = adev; 49 hwmgr->not_vf = !amdgpu_sriov_vf(adev); 50 hwmgr->device = amdgpu_cgs_create_device(adev); 51 if (!hwmgr->device) { 52 kfree(hwmgr); 53 return -ENOMEM; 54 } 55 56 mutex_init(&hwmgr->msg_lock); 57 hwmgr->chip_family = adev->family; 58 hwmgr->chip_id = adev->asic_type; 59 hwmgr->feature_mask = adev->pm.pp_feature; 60 hwmgr->display_config = &adev->pm.pm_display_cfg; 61 adev->powerplay.pp_handle = hwmgr; 62 adev->powerplay.pp_funcs = &pp_dpm_funcs; 63 return 0; 64 } 65 66 67 static void amd_powerplay_destroy(struct amdgpu_device *adev) 68 { 69 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 70 71 mutex_destroy(&hwmgr->msg_lock); 72 73 kfree(hwmgr->hardcode_pp_table); 74 hwmgr->hardcode_pp_table = NULL; 75 76 kfree(hwmgr); 77 hwmgr = NULL; 78 } 79 80 static int pp_early_init(struct amdgpu_ip_block *ip_block) 81 { 82 int ret; 83 struct amdgpu_device *adev = ip_block->adev; 84 ret = amd_powerplay_create(adev); 85 86 if (ret != 0) 87 return ret; 88 89 ret = hwmgr_early_init(adev->powerplay.pp_handle); 90 if (ret) 91 return -EINVAL; 92 93 return 0; 94 } 95 96 static void pp_swctf_delayed_work_handler(struct work_struct *work) 97 { 98 struct pp_hwmgr *hwmgr = 99 container_of(work, struct pp_hwmgr, swctf_delayed_work.work); 100 struct amdgpu_device *adev = hwmgr->adev; 101 struct amdgpu_dpm_thermal *range = 102 &adev->pm.dpm.thermal; 103 uint32_t gpu_temperature, size = sizeof(gpu_temperature); 104 int ret; 105 106 /* 107 * If the hotspot/edge temperature is confirmed as below SW CTF setting point 108 * after the delay enforced, nothing will be done. 109 * Otherwise, a graceful shutdown will be performed to prevent further damage. 110 */ 111 if (range->sw_ctf_threshold && 112 hwmgr->hwmgr_func->read_sensor) { 113 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, 114 AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 115 &gpu_temperature, 116 &size); 117 /* 118 * For some legacy ASICs, hotspot temperature retrieving might be not 119 * supported. Check the edge temperature instead then. 120 */ 121 if (ret == -EOPNOTSUPP) 122 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, 123 AMDGPU_PP_SENSOR_EDGE_TEMP, 124 &gpu_temperature, 125 &size); 126 if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold) 127 return; 128 } 129 130 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 131 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 132 orderly_poweroff(true); 133 } 134 135 static int pp_sw_init(struct amdgpu_ip_block *ip_block) 136 { 137 struct amdgpu_device *adev = ip_block->adev; 138 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 139 int ret = 0; 140 141 ret = hwmgr_sw_init(hwmgr); 142 143 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully"); 144 145 if (!ret) 146 INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work, 147 pp_swctf_delayed_work_handler); 148 149 return ret; 150 } 151 152 static int pp_sw_fini(struct amdgpu_ip_block *ip_block) 153 { 154 struct amdgpu_device *adev = ip_block->adev; 155 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 156 157 hwmgr_sw_fini(hwmgr); 158 159 amdgpu_ucode_release(&adev->pm.fw); 160 161 return 0; 162 } 163 164 static int pp_hw_init(struct amdgpu_ip_block *ip_block) 165 { 166 int ret = 0; 167 struct amdgpu_device *adev = ip_block->adev; 168 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 169 170 ret = hwmgr_hw_init(hwmgr); 171 172 if (ret) 173 pr_err("powerplay hw init failed\n"); 174 175 return ret; 176 } 177 178 static int pp_hw_fini(struct amdgpu_ip_block *ip_block) 179 { 180 struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; 181 182 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); 183 184 hwmgr_hw_fini(hwmgr); 185 186 return 0; 187 } 188 189 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev) 190 { 191 int r = -EINVAL; 192 void *cpu_ptr = NULL; 193 uint64_t gpu_addr; 194 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 195 196 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size, 197 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 198 &adev->pm.smu_prv_buffer, 199 &gpu_addr, 200 &cpu_ptr)) { 201 DRM_ERROR("amdgpu: failed to create smu prv buffer\n"); 202 return; 203 } 204 205 if (hwmgr->hwmgr_func->notify_cac_buffer_info) 206 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, 207 lower_32_bits((unsigned long)cpu_ptr), 208 upper_32_bits((unsigned long)cpu_ptr), 209 lower_32_bits(gpu_addr), 210 upper_32_bits(gpu_addr), 211 adev->pm.smu_prv_buffer_size); 212 213 if (r) { 214 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); 215 adev->pm.smu_prv_buffer = NULL; 216 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n"); 217 } 218 } 219 220 static int pp_late_init(struct amdgpu_ip_block *ip_block) 221 { 222 struct amdgpu_device *adev = ip_block->adev; 223 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 224 225 if (hwmgr && hwmgr->pm_en) 226 hwmgr_handle_task(hwmgr, 227 AMD_PP_TASK_COMPLETE_INIT, NULL); 228 if (adev->pm.smu_prv_buffer_size != 0) 229 pp_reserve_vram_for_smu(adev); 230 231 return 0; 232 } 233 234 static void pp_late_fini(struct amdgpu_ip_block *ip_block) 235 { 236 struct amdgpu_device *adev = ip_block->adev; 237 238 if (adev->pm.smu_prv_buffer) 239 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); 240 amd_powerplay_destroy(adev); 241 } 242 243 244 static bool pp_is_idle(struct amdgpu_ip_block *ip_block) 245 { 246 return false; 247 } 248 249 static int pp_set_powergating_state(struct amdgpu_ip_block *ip_block, 250 enum amd_powergating_state state) 251 { 252 return 0; 253 } 254 255 static int pp_suspend(struct amdgpu_ip_block *ip_block) 256 { 257 struct amdgpu_device *adev = ip_block->adev; 258 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 259 260 cancel_delayed_work_sync(&hwmgr->swctf_delayed_work); 261 262 return hwmgr_suspend(hwmgr); 263 } 264 265 static int pp_resume(struct amdgpu_ip_block *ip_block) 266 { 267 struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle; 268 269 return hwmgr_resume(hwmgr); 270 } 271 272 static int pp_set_clockgating_state(struct amdgpu_ip_block *ip_block, 273 enum amd_clockgating_state state) 274 { 275 return 0; 276 } 277 278 static const struct amd_ip_funcs pp_ip_funcs = { 279 .name = "powerplay", 280 .early_init = pp_early_init, 281 .late_init = pp_late_init, 282 .sw_init = pp_sw_init, 283 .sw_fini = pp_sw_fini, 284 .hw_init = pp_hw_init, 285 .hw_fini = pp_hw_fini, 286 .late_fini = pp_late_fini, 287 .suspend = pp_suspend, 288 .resume = pp_resume, 289 .is_idle = pp_is_idle, 290 .set_clockgating_state = pp_set_clockgating_state, 291 .set_powergating_state = pp_set_powergating_state, 292 }; 293 294 const struct amdgpu_ip_block_version pp_smu_ip_block = 295 { 296 .type = AMD_IP_BLOCK_TYPE_SMC, 297 .major = 1, 298 .minor = 0, 299 .rev = 0, 300 .funcs = &pp_ip_funcs, 301 }; 302 303 /* This interface only be supported On Vi, 304 * because only smu7/8 can help to load gfx/sdma fw, 305 * smu need to be enabled before load other ip's fw. 306 * so call start smu to load smu7 fw and other ip's fw 307 */ 308 static int pp_dpm_load_fw(void *handle) 309 { 310 struct pp_hwmgr *hwmgr = handle; 311 312 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) 313 return -EINVAL; 314 315 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { 316 pr_err("fw load failed\n"); 317 return -EINVAL; 318 } 319 320 return 0; 321 } 322 323 static int pp_dpm_fw_loading_complete(void *handle) 324 { 325 return 0; 326 } 327 328 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) 329 { 330 struct pp_hwmgr *hwmgr = handle; 331 332 if (!hwmgr || !hwmgr->pm_en) 333 return -EINVAL; 334 335 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { 336 pr_info_ratelimited("%s was not implemented.\n", __func__); 337 return 0; 338 } 339 340 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id); 341 } 342 343 static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, 344 enum amd_dpm_forced_level *level) 345 { 346 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 347 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 348 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 349 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 350 351 if (!(hwmgr->dpm_level & profile_mode_mask)) { 352 /* enter umd pstate, save current level, disable gfx cg*/ 353 if (*level & profile_mode_mask) { 354 hwmgr->saved_dpm_level = hwmgr->dpm_level; 355 hwmgr->en_umd_pstate = true; 356 } 357 } else { 358 /* exit umd pstate, restore level, enable gfx cg*/ 359 if (!(*level & profile_mode_mask)) { 360 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 361 *level = hwmgr->saved_dpm_level; 362 hwmgr->en_umd_pstate = false; 363 } 364 } 365 } 366 367 static int pp_dpm_force_performance_level(void *handle, 368 enum amd_dpm_forced_level level) 369 { 370 struct pp_hwmgr *hwmgr = handle; 371 372 if (!hwmgr || !hwmgr->pm_en) 373 return -EINVAL; 374 375 if (level == hwmgr->dpm_level) 376 return 0; 377 378 pp_dpm_en_umd_pstate(hwmgr, &level); 379 hwmgr->request_dpm_level = level; 380 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 381 382 return 0; 383 } 384 385 static enum amd_dpm_forced_level pp_dpm_get_performance_level( 386 void *handle) 387 { 388 struct pp_hwmgr *hwmgr = handle; 389 390 if (!hwmgr || !hwmgr->pm_en) 391 return -EINVAL; 392 393 return hwmgr->dpm_level; 394 } 395 396 static uint32_t pp_dpm_get_sclk(void *handle, bool low) 397 { 398 struct pp_hwmgr *hwmgr = handle; 399 400 if (!hwmgr || !hwmgr->pm_en) 401 return 0; 402 403 if (hwmgr->hwmgr_func->get_sclk == NULL) { 404 pr_info_ratelimited("%s was not implemented.\n", __func__); 405 return 0; 406 } 407 return hwmgr->hwmgr_func->get_sclk(hwmgr, low); 408 } 409 410 static uint32_t pp_dpm_get_mclk(void *handle, bool low) 411 { 412 struct pp_hwmgr *hwmgr = handle; 413 414 if (!hwmgr || !hwmgr->pm_en) 415 return 0; 416 417 if (hwmgr->hwmgr_func->get_mclk == NULL) { 418 pr_info_ratelimited("%s was not implemented.\n", __func__); 419 return 0; 420 } 421 return hwmgr->hwmgr_func->get_mclk(hwmgr, low); 422 } 423 424 static void pp_dpm_powergate_vce(void *handle, bool gate) 425 { 426 struct pp_hwmgr *hwmgr = handle; 427 428 if (!hwmgr || !hwmgr->pm_en) 429 return; 430 431 if (hwmgr->hwmgr_func->powergate_vce == NULL) { 432 pr_info_ratelimited("%s was not implemented.\n", __func__); 433 return; 434 } 435 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); 436 } 437 438 static void pp_dpm_powergate_uvd(void *handle, bool gate) 439 { 440 struct pp_hwmgr *hwmgr = handle; 441 442 if (!hwmgr || !hwmgr->pm_en) 443 return; 444 445 if (hwmgr->hwmgr_func->powergate_uvd == NULL) { 446 pr_info_ratelimited("%s was not implemented.\n", __func__); 447 return; 448 } 449 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); 450 } 451 452 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 453 enum amd_pm_state_type *user_state) 454 { 455 struct pp_hwmgr *hwmgr = handle; 456 457 if (!hwmgr || !hwmgr->pm_en) 458 return -EINVAL; 459 460 return hwmgr_handle_task(hwmgr, task_id, user_state); 461 } 462 463 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) 464 { 465 struct pp_hwmgr *hwmgr = handle; 466 struct pp_power_state *state; 467 enum amd_pm_state_type pm_type; 468 469 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps) 470 return -EINVAL; 471 472 state = hwmgr->current_ps; 473 474 switch (state->classification.ui_label) { 475 case PP_StateUILabel_Battery: 476 pm_type = POWER_STATE_TYPE_BATTERY; 477 break; 478 case PP_StateUILabel_Balanced: 479 pm_type = POWER_STATE_TYPE_BALANCED; 480 break; 481 case PP_StateUILabel_Performance: 482 pm_type = POWER_STATE_TYPE_PERFORMANCE; 483 break; 484 default: 485 if (state->classification.flags & PP_StateClassificationFlag_Boot) 486 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; 487 else 488 pm_type = POWER_STATE_TYPE_DEFAULT; 489 break; 490 } 491 492 return pm_type; 493 } 494 495 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) 496 { 497 struct pp_hwmgr *hwmgr = handle; 498 499 if (!hwmgr || !hwmgr->pm_en) 500 return -EOPNOTSUPP; 501 502 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) 503 return -EOPNOTSUPP; 504 505 if (mode == U32_MAX) 506 return -EINVAL; 507 508 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); 509 510 return 0; 511 } 512 513 static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode) 514 { 515 struct pp_hwmgr *hwmgr = handle; 516 517 if (!hwmgr || !hwmgr->pm_en) 518 return -EOPNOTSUPP; 519 520 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) 521 return -EOPNOTSUPP; 522 523 if (!fan_mode) 524 return -EINVAL; 525 526 *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); 527 return 0; 528 } 529 530 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed) 531 { 532 struct pp_hwmgr *hwmgr = handle; 533 534 if (!hwmgr || !hwmgr->pm_en) 535 return -EOPNOTSUPP; 536 537 if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) 538 return -EOPNOTSUPP; 539 540 if (speed == U32_MAX) 541 return -EINVAL; 542 543 return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed); 544 } 545 546 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed) 547 { 548 struct pp_hwmgr *hwmgr = handle; 549 550 if (!hwmgr || !hwmgr->pm_en) 551 return -EOPNOTSUPP; 552 553 if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) 554 return -EOPNOTSUPP; 555 556 if (!speed) 557 return -EINVAL; 558 559 return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed); 560 } 561 562 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) 563 { 564 struct pp_hwmgr *hwmgr = handle; 565 566 if (!hwmgr || !hwmgr->pm_en) 567 return -EOPNOTSUPP; 568 569 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) 570 return -EOPNOTSUPP; 571 572 if (!rpm) 573 return -EINVAL; 574 575 return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); 576 } 577 578 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) 579 { 580 struct pp_hwmgr *hwmgr = handle; 581 582 if (!hwmgr || !hwmgr->pm_en) 583 return -EOPNOTSUPP; 584 585 if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) 586 return -EOPNOTSUPP; 587 588 if (rpm == U32_MAX) 589 return -EINVAL; 590 591 return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm); 592 } 593 594 static int pp_dpm_get_pp_num_states(void *handle, 595 struct pp_states_info *data) 596 { 597 struct pp_hwmgr *hwmgr = handle; 598 int i; 599 600 memset(data, 0, sizeof(*data)); 601 602 if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps) 603 return -EINVAL; 604 605 data->nums = hwmgr->num_ps; 606 607 for (i = 0; i < hwmgr->num_ps; i++) { 608 struct pp_power_state *state = (struct pp_power_state *) 609 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size); 610 switch (state->classification.ui_label) { 611 case PP_StateUILabel_Battery: 612 data->states[i] = POWER_STATE_TYPE_BATTERY; 613 break; 614 case PP_StateUILabel_Balanced: 615 data->states[i] = POWER_STATE_TYPE_BALANCED; 616 break; 617 case PP_StateUILabel_Performance: 618 data->states[i] = POWER_STATE_TYPE_PERFORMANCE; 619 break; 620 default: 621 if (state->classification.flags & PP_StateClassificationFlag_Boot) 622 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT; 623 else 624 data->states[i] = POWER_STATE_TYPE_DEFAULT; 625 } 626 } 627 return 0; 628 } 629 630 static int pp_dpm_get_pp_table(void *handle, char **table) 631 { 632 struct pp_hwmgr *hwmgr = handle; 633 634 if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table) 635 return -EINVAL; 636 637 *table = (char *)hwmgr->soft_pp_table; 638 return hwmgr->soft_pp_table_size; 639 } 640 641 static int amd_powerplay_reset(void *handle) 642 { 643 struct pp_hwmgr *hwmgr = handle; 644 int ret; 645 646 ret = hwmgr_hw_fini(hwmgr); 647 if (ret) 648 return ret; 649 650 ret = hwmgr_hw_init(hwmgr); 651 if (ret) 652 return ret; 653 654 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL); 655 } 656 657 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 658 { 659 struct pp_hwmgr *hwmgr = handle; 660 int ret = -ENOMEM; 661 662 if (!hwmgr || !hwmgr->pm_en) 663 return -EINVAL; 664 665 if (!hwmgr->hardcode_pp_table) { 666 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, 667 hwmgr->soft_pp_table_size, 668 GFP_KERNEL); 669 if (!hwmgr->hardcode_pp_table) 670 return ret; 671 } 672 673 memcpy(hwmgr->hardcode_pp_table, buf, size); 674 675 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; 676 677 ret = amd_powerplay_reset(handle); 678 if (ret) 679 return ret; 680 681 if (hwmgr->hwmgr_func->avfs_control) 682 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false); 683 684 return ret; 685 } 686 687 static int pp_dpm_force_clock_level(void *handle, 688 enum pp_clock_type type, uint32_t mask) 689 { 690 struct pp_hwmgr *hwmgr = handle; 691 692 if (!hwmgr || !hwmgr->pm_en) 693 return -EINVAL; 694 695 if (hwmgr->hwmgr_func->force_clock_level == NULL) { 696 pr_info_ratelimited("%s was not implemented.\n", __func__); 697 return 0; 698 } 699 700 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 701 pr_debug("force clock level is for dpm manual mode only.\n"); 702 return -EINVAL; 703 } 704 705 return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); 706 } 707 708 static int pp_dpm_emit_clock_levels(void *handle, 709 enum pp_clock_type type, 710 char *buf, 711 int *offset) 712 { 713 struct pp_hwmgr *hwmgr = handle; 714 715 if (!hwmgr || !hwmgr->pm_en) 716 return -EOPNOTSUPP; 717 718 if (!hwmgr->hwmgr_func->emit_clock_levels) 719 return -ENOENT; 720 721 return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset); 722 } 723 724 static int pp_dpm_print_clock_levels(void *handle, 725 enum pp_clock_type type, char *buf) 726 { 727 struct pp_hwmgr *hwmgr = handle; 728 729 if (!hwmgr || !hwmgr->pm_en) 730 return -EINVAL; 731 732 if (hwmgr->hwmgr_func->print_clock_levels == NULL) { 733 pr_info_ratelimited("%s was not implemented.\n", __func__); 734 return 0; 735 } 736 return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); 737 } 738 739 static int pp_dpm_get_sclk_od(void *handle) 740 { 741 struct pp_hwmgr *hwmgr = handle; 742 743 if (!hwmgr || !hwmgr->pm_en) 744 return -EINVAL; 745 746 if (hwmgr->hwmgr_func->get_sclk_od == NULL) { 747 pr_info_ratelimited("%s was not implemented.\n", __func__); 748 return 0; 749 } 750 return hwmgr->hwmgr_func->get_sclk_od(hwmgr); 751 } 752 753 static int pp_dpm_set_sclk_od(void *handle, uint32_t value) 754 { 755 struct pp_hwmgr *hwmgr = handle; 756 757 if (!hwmgr || !hwmgr->pm_en) 758 return -EINVAL; 759 760 if (hwmgr->hwmgr_func->set_sclk_od == NULL) { 761 pr_info_ratelimited("%s was not implemented.\n", __func__); 762 return 0; 763 } 764 765 return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); 766 } 767 768 static int pp_dpm_get_mclk_od(void *handle) 769 { 770 struct pp_hwmgr *hwmgr = handle; 771 772 if (!hwmgr || !hwmgr->pm_en) 773 return -EINVAL; 774 775 if (hwmgr->hwmgr_func->get_mclk_od == NULL) { 776 pr_info_ratelimited("%s was not implemented.\n", __func__); 777 return 0; 778 } 779 return hwmgr->hwmgr_func->get_mclk_od(hwmgr); 780 } 781 782 static int pp_dpm_set_mclk_od(void *handle, uint32_t value) 783 { 784 struct pp_hwmgr *hwmgr = handle; 785 786 if (!hwmgr || !hwmgr->pm_en) 787 return -EINVAL; 788 789 if (hwmgr->hwmgr_func->set_mclk_od == NULL) { 790 pr_info_ratelimited("%s was not implemented.\n", __func__); 791 return 0; 792 } 793 return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); 794 } 795 796 static int pp_dpm_read_sensor(void *handle, int idx, 797 void *value, int *size) 798 { 799 struct pp_hwmgr *hwmgr = handle; 800 801 if (!hwmgr || !hwmgr->pm_en || !value) 802 return -EINVAL; 803 804 switch (idx) { 805 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 806 *((uint32_t *)value) = hwmgr->pstate_sclk * 100; 807 return 0; 808 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 809 *((uint32_t *)value) = hwmgr->pstate_mclk * 100; 810 return 0; 811 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 812 *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100; 813 return 0; 814 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 815 *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100; 816 return 0; 817 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 818 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM; 819 return 0; 820 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 821 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM; 822 return 0; 823 default: 824 return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); 825 } 826 } 827 828 static struct amd_vce_state* 829 pp_dpm_get_vce_clock_state(void *handle, unsigned idx) 830 { 831 struct pp_hwmgr *hwmgr = handle; 832 833 if (!hwmgr || !hwmgr->pm_en) 834 return NULL; 835 836 if (idx < hwmgr->num_vce_state_tables) 837 return &hwmgr->vce_states[idx]; 838 return NULL; 839 } 840 841 static int pp_get_power_profile_mode(void *handle, char *buf) 842 { 843 struct pp_hwmgr *hwmgr = handle; 844 845 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode) 846 return -EOPNOTSUPP; 847 if (!buf) 848 return -EINVAL; 849 850 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); 851 } 852 853 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) 854 { 855 struct pp_hwmgr *hwmgr = handle; 856 857 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode) 858 return -EOPNOTSUPP; 859 860 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 861 pr_debug("power profile setting is for manual dpm mode only.\n"); 862 return -EINVAL; 863 } 864 865 return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); 866 } 867 868 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size) 869 { 870 struct pp_hwmgr *hwmgr = handle; 871 872 if (!hwmgr || !hwmgr->pm_en) 873 return -EINVAL; 874 875 if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL) 876 return 0; 877 878 return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size); 879 } 880 881 static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, 882 long *input, uint32_t size) 883 { 884 struct pp_hwmgr *hwmgr = handle; 885 886 if (!hwmgr || !hwmgr->pm_en) 887 return -EINVAL; 888 889 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { 890 pr_info_ratelimited("%s was not implemented.\n", __func__); 891 return 0; 892 } 893 894 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size); 895 } 896 897 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state) 898 { 899 struct pp_hwmgr *hwmgr = handle; 900 901 if (!hwmgr) 902 return -EINVAL; 903 904 if (!hwmgr->pm_en) 905 return 0; 906 907 if (hwmgr->hwmgr_func->set_mp1_state) 908 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state); 909 910 return 0; 911 } 912 913 static int pp_dpm_switch_power_profile(void *handle, 914 enum PP_SMC_POWER_PROFILE type, bool en) 915 { 916 struct pp_hwmgr *hwmgr = handle; 917 long workload[1]; 918 uint32_t index; 919 920 if (!hwmgr || !hwmgr->pm_en) 921 return -EINVAL; 922 923 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 924 pr_info_ratelimited("%s was not implemented.\n", __func__); 925 return -EINVAL; 926 } 927 928 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 929 return -EINVAL; 930 931 if (!en) { 932 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); 933 index = fls(hwmgr->workload_mask); 934 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0; 935 workload[0] = hwmgr->workload_setting[index]; 936 } else { 937 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]); 938 index = fls(hwmgr->workload_mask); 939 index = index <= Workload_Policy_Max ? index - 1 : 0; 940 workload[0] = hwmgr->workload_setting[index]; 941 } 942 943 if (type == PP_SMC_POWER_PROFILE_COMPUTE && 944 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) { 945 if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) 946 return -EINVAL; 947 } 948 949 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 950 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0); 951 952 return 0; 953 } 954 955 static int pp_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) 956 { 957 struct pp_hwmgr *hwmgr = handle; 958 uint32_t max_power_limit; 959 960 if (!hwmgr || !hwmgr->pm_en) 961 return -EINVAL; 962 963 if (hwmgr->hwmgr_func->set_power_limit == NULL) { 964 pr_info_ratelimited("%s was not implemented.\n", __func__); 965 return -EINVAL; 966 } 967 968 if (limit == 0) 969 limit = hwmgr->default_power_limit; 970 971 max_power_limit = hwmgr->default_power_limit; 972 if (hwmgr->od_enabled) { 973 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); 974 max_power_limit /= 100; 975 } 976 977 if (limit > max_power_limit) 978 return -EINVAL; 979 980 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); 981 hwmgr->power_limit = limit; 982 return 0; 983 } 984 985 static int pp_get_power_limit(void *handle, uint32_t *limit, 986 enum pp_power_limit_level pp_limit_level, 987 enum pp_power_type power_type) 988 { 989 struct pp_hwmgr *hwmgr = handle; 990 int ret = 0; 991 992 if (!hwmgr || !hwmgr->pm_en || !limit) 993 return -EINVAL; 994 995 if (power_type != PP_PWR_TYPE_SUSTAINED) 996 return -EOPNOTSUPP; 997 998 switch (pp_limit_level) { 999 case PP_PWR_LIMIT_CURRENT: 1000 *limit = hwmgr->power_limit; 1001 break; 1002 case PP_PWR_LIMIT_DEFAULT: 1003 *limit = hwmgr->default_power_limit; 1004 break; 1005 case PP_PWR_LIMIT_MAX: 1006 *limit = hwmgr->default_power_limit; 1007 if (hwmgr->od_enabled) { 1008 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); 1009 *limit /= 100; 1010 } 1011 break; 1012 case PP_PWR_LIMIT_MIN: 1013 *limit = 0; 1014 break; 1015 default: 1016 ret = -EOPNOTSUPP; 1017 break; 1018 } 1019 1020 return ret; 1021 } 1022 1023 static int pp_display_configuration_change(void *handle, 1024 const struct amd_pp_display_configuration *display_config) 1025 { 1026 struct pp_hwmgr *hwmgr = handle; 1027 1028 if (!hwmgr || !hwmgr->pm_en) 1029 return -EINVAL; 1030 1031 phm_store_dal_configuration_data(hwmgr, display_config); 1032 return 0; 1033 } 1034 1035 static int pp_get_display_power_level(void *handle, 1036 struct amd_pp_simple_clock_info *output) 1037 { 1038 struct pp_hwmgr *hwmgr = handle; 1039 1040 if (!hwmgr || !hwmgr->pm_en || !output) 1041 return -EINVAL; 1042 1043 return phm_get_dal_power_level(hwmgr, output); 1044 } 1045 1046 static int pp_get_current_clocks(void *handle, 1047 struct amd_pp_clock_info *clocks) 1048 { 1049 struct amd_pp_simple_clock_info simple_clocks = { 0 }; 1050 struct pp_clock_info hw_clocks; 1051 struct pp_hwmgr *hwmgr = handle; 1052 int ret = 0; 1053 1054 if (!hwmgr || !hwmgr->pm_en) 1055 return -EINVAL; 1056 1057 phm_get_dal_power_level(hwmgr, &simple_clocks); 1058 1059 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1060 PHM_PlatformCaps_PowerContainment)) 1061 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, 1062 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment); 1063 else 1064 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, 1065 &hw_clocks, PHM_PerformanceLevelDesignation_Activity); 1066 1067 if (ret) { 1068 pr_debug("Error in phm_get_clock_info \n"); 1069 return -EINVAL; 1070 } 1071 1072 clocks->min_engine_clock = hw_clocks.min_eng_clk; 1073 clocks->max_engine_clock = hw_clocks.max_eng_clk; 1074 clocks->min_memory_clock = hw_clocks.min_mem_clk; 1075 clocks->max_memory_clock = hw_clocks.max_mem_clk; 1076 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; 1077 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; 1078 1079 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1080 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1081 1082 if (simple_clocks.level == 0) 1083 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; 1084 else 1085 clocks->max_clocks_state = simple_clocks.level; 1086 1087 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) { 1088 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1089 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1090 } 1091 return 0; 1092 } 1093 1094 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 1095 { 1096 struct pp_hwmgr *hwmgr = handle; 1097 1098 if (!hwmgr || !hwmgr->pm_en) 1099 return -EINVAL; 1100 1101 if (clocks == NULL) 1102 return -EINVAL; 1103 1104 return phm_get_clock_by_type(hwmgr, type, clocks); 1105 } 1106 1107 static int pp_get_clock_by_type_with_latency(void *handle, 1108 enum amd_pp_clock_type type, 1109 struct pp_clock_levels_with_latency *clocks) 1110 { 1111 struct pp_hwmgr *hwmgr = handle; 1112 1113 if (!hwmgr || !hwmgr->pm_en || !clocks) 1114 return -EINVAL; 1115 1116 return phm_get_clock_by_type_with_latency(hwmgr, type, clocks); 1117 } 1118 1119 static int pp_get_clock_by_type_with_voltage(void *handle, 1120 enum amd_pp_clock_type type, 1121 struct pp_clock_levels_with_voltage *clocks) 1122 { 1123 struct pp_hwmgr *hwmgr = handle; 1124 1125 if (!hwmgr || !hwmgr->pm_en || !clocks) 1126 return -EINVAL; 1127 1128 return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); 1129 } 1130 1131 static int pp_set_watermarks_for_clocks_ranges(void *handle, 1132 void *clock_ranges) 1133 { 1134 struct pp_hwmgr *hwmgr = handle; 1135 1136 if (!hwmgr || !hwmgr->pm_en || !clock_ranges) 1137 return -EINVAL; 1138 1139 return phm_set_watermarks_for_clocks_ranges(hwmgr, 1140 clock_ranges); 1141 } 1142 1143 static int pp_display_clock_voltage_request(void *handle, 1144 struct pp_display_clock_request *clock) 1145 { 1146 struct pp_hwmgr *hwmgr = handle; 1147 1148 if (!hwmgr || !hwmgr->pm_en || !clock) 1149 return -EINVAL; 1150 1151 return phm_display_clock_voltage_request(hwmgr, clock); 1152 } 1153 1154 static int pp_get_display_mode_validation_clocks(void *handle, 1155 struct amd_pp_simple_clock_info *clocks) 1156 { 1157 struct pp_hwmgr *hwmgr = handle; 1158 int ret = 0; 1159 1160 if (!hwmgr || !hwmgr->pm_en || !clocks) 1161 return -EINVAL; 1162 1163 clocks->level = PP_DAL_POWERLEVEL_7; 1164 1165 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) 1166 ret = phm_get_max_high_clocks(hwmgr, clocks); 1167 1168 return ret; 1169 } 1170 1171 static int pp_dpm_powergate_mmhub(void *handle) 1172 { 1173 struct pp_hwmgr *hwmgr = handle; 1174 1175 if (!hwmgr || !hwmgr->pm_en) 1176 return -EINVAL; 1177 1178 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) { 1179 pr_info_ratelimited("%s was not implemented.\n", __func__); 1180 return 0; 1181 } 1182 1183 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr); 1184 } 1185 1186 static int pp_dpm_powergate_gfx(void *handle, bool gate) 1187 { 1188 struct pp_hwmgr *hwmgr = handle; 1189 1190 if (!hwmgr || !hwmgr->pm_en) 1191 return 0; 1192 1193 if (hwmgr->hwmgr_func->powergate_gfx == NULL) { 1194 pr_info_ratelimited("%s was not implemented.\n", __func__); 1195 return 0; 1196 } 1197 1198 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate); 1199 } 1200 1201 static void pp_dpm_powergate_acp(void *handle, bool gate) 1202 { 1203 struct pp_hwmgr *hwmgr = handle; 1204 1205 if (!hwmgr || !hwmgr->pm_en) 1206 return; 1207 1208 if (hwmgr->hwmgr_func->powergate_acp == NULL) { 1209 pr_info_ratelimited("%s was not implemented.\n", __func__); 1210 return; 1211 } 1212 1213 hwmgr->hwmgr_func->powergate_acp(hwmgr, gate); 1214 } 1215 1216 static void pp_dpm_powergate_sdma(void *handle, bool gate) 1217 { 1218 struct pp_hwmgr *hwmgr = handle; 1219 1220 if (!hwmgr) 1221 return; 1222 1223 if (hwmgr->hwmgr_func->powergate_sdma == NULL) { 1224 pr_info_ratelimited("%s was not implemented.\n", __func__); 1225 return; 1226 } 1227 1228 hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate); 1229 } 1230 1231 static int pp_set_powergating_by_smu(void *handle, 1232 uint32_t block_type, 1233 bool gate, 1234 int inst) 1235 { 1236 int ret = 0; 1237 1238 switch (block_type) { 1239 case AMD_IP_BLOCK_TYPE_UVD: 1240 case AMD_IP_BLOCK_TYPE_VCN: 1241 pp_dpm_powergate_uvd(handle, gate); 1242 break; 1243 case AMD_IP_BLOCK_TYPE_VCE: 1244 pp_dpm_powergate_vce(handle, gate); 1245 break; 1246 case AMD_IP_BLOCK_TYPE_GMC: 1247 /* 1248 * For now, this is only used on PICASSO. 1249 * And only "gate" operation is supported. 1250 */ 1251 if (gate) 1252 pp_dpm_powergate_mmhub(handle); 1253 break; 1254 case AMD_IP_BLOCK_TYPE_GFX: 1255 ret = pp_dpm_powergate_gfx(handle, gate); 1256 break; 1257 case AMD_IP_BLOCK_TYPE_ACP: 1258 pp_dpm_powergate_acp(handle, gate); 1259 break; 1260 case AMD_IP_BLOCK_TYPE_SDMA: 1261 pp_dpm_powergate_sdma(handle, gate); 1262 break; 1263 default: 1264 break; 1265 } 1266 return ret; 1267 } 1268 1269 static int pp_notify_smu_enable_pwe(void *handle) 1270 { 1271 struct pp_hwmgr *hwmgr = handle; 1272 1273 if (!hwmgr || !hwmgr->pm_en) 1274 return -EINVAL; 1275 1276 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) { 1277 pr_info_ratelimited("%s was not implemented.\n", __func__); 1278 return -EINVAL; 1279 } 1280 1281 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr); 1282 1283 return 0; 1284 } 1285 1286 static int pp_enable_mgpu_fan_boost(void *handle) 1287 { 1288 struct pp_hwmgr *hwmgr = handle; 1289 1290 if (!hwmgr) 1291 return -EINVAL; 1292 1293 if (!hwmgr->pm_en || 1294 hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) 1295 return 0; 1296 1297 hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr); 1298 1299 return 0; 1300 } 1301 1302 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock) 1303 { 1304 struct pp_hwmgr *hwmgr = handle; 1305 1306 if (!hwmgr || !hwmgr->pm_en) 1307 return -EINVAL; 1308 1309 if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) { 1310 pr_debug("%s was not implemented.\n", __func__); 1311 return -EINVAL; 1312 } 1313 1314 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock); 1315 1316 return 0; 1317 } 1318 1319 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock) 1320 { 1321 struct pp_hwmgr *hwmgr = handle; 1322 1323 if (!hwmgr || !hwmgr->pm_en) 1324 return -EINVAL; 1325 1326 if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) { 1327 pr_debug("%s was not implemented.\n", __func__); 1328 return -EINVAL; 1329 } 1330 1331 hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock); 1332 1333 return 0; 1334 } 1335 1336 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock) 1337 { 1338 struct pp_hwmgr *hwmgr = handle; 1339 1340 if (!hwmgr || !hwmgr->pm_en) 1341 return -EINVAL; 1342 1343 if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) { 1344 pr_debug("%s was not implemented.\n", __func__); 1345 return -EINVAL; 1346 } 1347 1348 hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock); 1349 1350 return 0; 1351 } 1352 1353 static int pp_set_active_display_count(void *handle, uint32_t count) 1354 { 1355 struct pp_hwmgr *hwmgr = handle; 1356 1357 if (!hwmgr || !hwmgr->pm_en) 1358 return -EINVAL; 1359 1360 return phm_set_active_display_count(hwmgr, count); 1361 } 1362 1363 static int pp_get_asic_baco_capability(void *handle) 1364 { 1365 struct pp_hwmgr *hwmgr = handle; 1366 1367 if (!hwmgr) 1368 return false; 1369 1370 if (!(hwmgr->not_vf && amdgpu_dpm) || 1371 !hwmgr->hwmgr_func->get_bamaco_support) 1372 return false; 1373 1374 return hwmgr->hwmgr_func->get_bamaco_support(hwmgr); 1375 } 1376 1377 static int pp_get_asic_baco_state(void *handle, int *state) 1378 { 1379 struct pp_hwmgr *hwmgr = handle; 1380 1381 if (!hwmgr) 1382 return -EINVAL; 1383 1384 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state) 1385 return 0; 1386 1387 hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state); 1388 1389 return 0; 1390 } 1391 1392 static int pp_set_asic_baco_state(void *handle, int state) 1393 { 1394 struct pp_hwmgr *hwmgr = handle; 1395 1396 if (!hwmgr) 1397 return -EINVAL; 1398 1399 if (!(hwmgr->not_vf && amdgpu_dpm) || 1400 !hwmgr->hwmgr_func->set_asic_baco_state) 1401 return 0; 1402 1403 hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state); 1404 1405 return 0; 1406 } 1407 1408 static int pp_get_ppfeature_status(void *handle, char *buf) 1409 { 1410 struct pp_hwmgr *hwmgr = handle; 1411 1412 if (!hwmgr || !hwmgr->pm_en || !buf) 1413 return -EINVAL; 1414 1415 if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) { 1416 pr_info_ratelimited("%s was not implemented.\n", __func__); 1417 return -EINVAL; 1418 } 1419 1420 return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf); 1421 } 1422 1423 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks) 1424 { 1425 struct pp_hwmgr *hwmgr = handle; 1426 1427 if (!hwmgr || !hwmgr->pm_en) 1428 return -EINVAL; 1429 1430 if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) { 1431 pr_info_ratelimited("%s was not implemented.\n", __func__); 1432 return -EINVAL; 1433 } 1434 1435 return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks); 1436 } 1437 1438 static int pp_asic_reset_mode_2(void *handle) 1439 { 1440 struct pp_hwmgr *hwmgr = handle; 1441 1442 if (!hwmgr || !hwmgr->pm_en) 1443 return -EINVAL; 1444 1445 if (hwmgr->hwmgr_func->asic_reset == NULL) { 1446 pr_info_ratelimited("%s was not implemented.\n", __func__); 1447 return -EINVAL; 1448 } 1449 1450 return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2); 1451 } 1452 1453 static int pp_smu_i2c_bus_access(void *handle, bool acquire) 1454 { 1455 struct pp_hwmgr *hwmgr = handle; 1456 1457 if (!hwmgr || !hwmgr->pm_en) 1458 return -EINVAL; 1459 1460 if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) { 1461 pr_info_ratelimited("%s was not implemented.\n", __func__); 1462 return -EINVAL; 1463 } 1464 1465 return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire); 1466 } 1467 1468 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) 1469 { 1470 struct pp_hwmgr *hwmgr = handle; 1471 1472 if (!hwmgr) 1473 return -EINVAL; 1474 1475 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate) 1476 return 0; 1477 1478 hwmgr->hwmgr_func->set_df_cstate(hwmgr, state); 1479 1480 return 0; 1481 } 1482 1483 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate) 1484 { 1485 struct pp_hwmgr *hwmgr = handle; 1486 1487 if (!hwmgr) 1488 return -EINVAL; 1489 1490 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate) 1491 return 0; 1492 1493 hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate); 1494 1495 return 0; 1496 } 1497 1498 static ssize_t pp_get_gpu_metrics(void *handle, void **table) 1499 { 1500 struct pp_hwmgr *hwmgr = handle; 1501 1502 if (!hwmgr) 1503 return -EINVAL; 1504 1505 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics) 1506 return -EOPNOTSUPP; 1507 1508 return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table); 1509 } 1510 1511 static int pp_gfx_state_change_set(void *handle, uint32_t state) 1512 { 1513 struct pp_hwmgr *hwmgr = handle; 1514 1515 if (!hwmgr || !hwmgr->pm_en) 1516 return -EINVAL; 1517 1518 if (hwmgr->hwmgr_func->gfx_state_change == NULL) { 1519 pr_info_ratelimited("%s was not implemented.\n", __func__); 1520 return -EINVAL; 1521 } 1522 1523 hwmgr->hwmgr_func->gfx_state_change(hwmgr, state); 1524 return 0; 1525 } 1526 1527 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size) 1528 { 1529 struct pp_hwmgr *hwmgr = handle; 1530 struct amdgpu_device *adev = hwmgr->adev; 1531 int err; 1532 1533 if (!addr || !size) 1534 return -EINVAL; 1535 1536 *addr = NULL; 1537 *size = 0; 1538 if (adev->pm.smu_prv_buffer) { 1539 err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr); 1540 if (err) 1541 return err; 1542 *size = adev->pm.smu_prv_buffer_size; 1543 } 1544 1545 return 0; 1546 } 1547 1548 static void pp_pm_compute_clocks(void *handle) 1549 { 1550 struct pp_hwmgr *hwmgr = handle; 1551 struct amdgpu_device *adev = hwmgr->adev; 1552 1553 if (!adev->dc_enabled) { 1554 amdgpu_dpm_get_display_cfg(adev); 1555 pp_display_configuration_change(handle, 1556 &adev->pm.pm_display_cfg); 1557 } 1558 1559 pp_dpm_dispatch_tasks(handle, 1560 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, 1561 NULL); 1562 } 1563 1564 static const struct amd_pm_funcs pp_dpm_funcs = { 1565 .load_firmware = pp_dpm_load_fw, 1566 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, 1567 .force_performance_level = pp_dpm_force_performance_level, 1568 .get_performance_level = pp_dpm_get_performance_level, 1569 .get_current_power_state = pp_dpm_get_current_power_state, 1570 .dispatch_tasks = pp_dpm_dispatch_tasks, 1571 .set_fan_control_mode = pp_dpm_set_fan_control_mode, 1572 .get_fan_control_mode = pp_dpm_get_fan_control_mode, 1573 .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm, 1574 .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm, 1575 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm, 1576 .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm, 1577 .get_pp_num_states = pp_dpm_get_pp_num_states, 1578 .get_pp_table = pp_dpm_get_pp_table, 1579 .set_pp_table = pp_dpm_set_pp_table, 1580 .force_clock_level = pp_dpm_force_clock_level, 1581 .emit_clock_levels = pp_dpm_emit_clock_levels, 1582 .print_clock_levels = pp_dpm_print_clock_levels, 1583 .get_sclk_od = pp_dpm_get_sclk_od, 1584 .set_sclk_od = pp_dpm_set_sclk_od, 1585 .get_mclk_od = pp_dpm_get_mclk_od, 1586 .set_mclk_od = pp_dpm_set_mclk_od, 1587 .read_sensor = pp_dpm_read_sensor, 1588 .get_vce_clock_state = pp_dpm_get_vce_clock_state, 1589 .switch_power_profile = pp_dpm_switch_power_profile, 1590 .set_clockgating_by_smu = pp_set_clockgating_by_smu, 1591 .set_powergating_by_smu = pp_set_powergating_by_smu, 1592 .get_power_profile_mode = pp_get_power_profile_mode, 1593 .set_power_profile_mode = pp_set_power_profile_mode, 1594 .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol, 1595 .odn_edit_dpm_table = pp_odn_edit_dpm_table, 1596 .set_mp1_state = pp_dpm_set_mp1_state, 1597 .set_power_limit = pp_set_power_limit, 1598 .get_power_limit = pp_get_power_limit, 1599 /* export to DC */ 1600 .get_sclk = pp_dpm_get_sclk, 1601 .get_mclk = pp_dpm_get_mclk, 1602 .display_configuration_change = pp_display_configuration_change, 1603 .get_display_power_level = pp_get_display_power_level, 1604 .get_current_clocks = pp_get_current_clocks, 1605 .get_clock_by_type = pp_get_clock_by_type, 1606 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency, 1607 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage, 1608 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges, 1609 .display_clock_voltage_request = pp_display_clock_voltage_request, 1610 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks, 1611 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe, 1612 .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost, 1613 .set_active_display_count = pp_set_active_display_count, 1614 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk, 1615 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq, 1616 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq, 1617 .get_asic_baco_capability = pp_get_asic_baco_capability, 1618 .get_asic_baco_state = pp_get_asic_baco_state, 1619 .set_asic_baco_state = pp_set_asic_baco_state, 1620 .get_ppfeature_status = pp_get_ppfeature_status, 1621 .set_ppfeature_status = pp_set_ppfeature_status, 1622 .asic_reset_mode_2 = pp_asic_reset_mode_2, 1623 .smu_i2c_bus_access = pp_smu_i2c_bus_access, 1624 .set_df_cstate = pp_set_df_cstate, 1625 .set_xgmi_pstate = pp_set_xgmi_pstate, 1626 .get_gpu_metrics = pp_get_gpu_metrics, 1627 .gfx_state_change_set = pp_gfx_state_change_set, 1628 .get_smu_prv_buf_details = pp_get_prv_buffer_details, 1629 .pm_compute_clocks = pp_pm_compute_clocks, 1630 }; 1631