1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 #include <linux/power_supply.h> 28 #include <linux/reboot.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_smu.h" 32 #include "smu_internal.h" 33 #include "atom.h" 34 #include "arcturus_ppt.h" 35 #include "navi10_ppt.h" 36 #include "sienna_cichlid_ppt.h" 37 #include "renoir_ppt.h" 38 #include "vangogh_ppt.h" 39 #include "aldebaran_ppt.h" 40 #include "yellow_carp_ppt.h" 41 #include "cyan_skillfish_ppt.h" 42 #include "smu_v13_0_0_ppt.h" 43 #include "smu_v13_0_4_ppt.h" 44 #include "smu_v13_0_5_ppt.h" 45 #include "smu_v13_0_6_ppt.h" 46 #include "smu_v13_0_7_ppt.h" 47 #include "smu_v14_0_0_ppt.h" 48 #include "smu_v14_0_2_ppt.h" 49 #include "amd_pcie.h" 50 51 /* 52 * DO NOT use these for err/warn/info/debug messages. 53 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 54 * They are more MGPU friendly. 55 */ 56 #undef pr_err 57 #undef pr_warn 58 #undef pr_info 59 #undef pr_debug 60 61 static const struct amd_pm_funcs swsmu_pm_funcs; 62 static int smu_force_smuclk_levels(struct smu_context *smu, 63 enum smu_clk_type clk_type, 64 uint32_t mask); 65 static int smu_handle_task(struct smu_context *smu, 66 enum amd_dpm_forced_level level, 67 enum amd_pp_task task_id); 68 static int smu_reset(struct smu_context *smu); 69 static int smu_set_fan_speed_pwm(void *handle, u32 speed); 70 static int smu_set_fan_control_mode(void *handle, u32 value); 71 static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit); 72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); 73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); 74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); 75 static void smu_power_profile_mode_get(struct smu_context *smu, 76 enum PP_SMC_POWER_PROFILE profile_mode); 77 static void smu_power_profile_mode_put(struct smu_context *smu, 78 enum PP_SMC_POWER_PROFILE profile_mode); 79 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type); 80 static int smu_od_edit_dpm_table(void *handle, 81 enum PP_OD_DPM_TABLE_COMMAND type, 82 long *input, uint32_t size); 83 84 static int smu_sys_get_pp_feature_mask(void *handle, 85 char *buf) 86 { 87 struct smu_context *smu = handle; 88 89 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 90 return -EOPNOTSUPP; 91 92 return smu_get_pp_feature_mask(smu, buf); 93 } 94 95 static int smu_sys_set_pp_feature_mask(void *handle, 96 uint64_t new_mask) 97 { 98 struct smu_context *smu = handle; 99 100 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 101 return -EOPNOTSUPP; 102 103 return smu_set_pp_feature_mask(smu, new_mask); 104 } 105 106 int smu_set_residency_gfxoff(struct smu_context *smu, bool value) 107 { 108 if (!smu->ppt_funcs->set_gfx_off_residency) 109 return -EINVAL; 110 111 return smu_set_gfx_off_residency(smu, value); 112 } 113 114 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) 115 { 116 if (!smu->ppt_funcs->get_gfx_off_residency) 117 return -EINVAL; 118 119 return smu_get_gfx_off_residency(smu, value); 120 } 121 122 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) 123 { 124 if (!smu->ppt_funcs->get_gfx_off_entrycount) 125 return -EINVAL; 126 127 return smu_get_gfx_off_entrycount(smu, value); 128 } 129 130 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) 131 { 132 if (!smu->ppt_funcs->get_gfx_off_status) 133 return -EINVAL; 134 135 *value = smu_get_gfx_off_status(smu); 136 137 return 0; 138 } 139 140 int smu_set_soft_freq_range(struct smu_context *smu, 141 enum pp_clock_type type, 142 uint32_t min, 143 uint32_t max) 144 { 145 enum smu_clk_type clk_type; 146 int ret = 0; 147 148 clk_type = smu_convert_to_smuclk(type); 149 if (clk_type == SMU_CLK_COUNT) 150 return -EINVAL; 151 152 if (smu->ppt_funcs->set_soft_freq_limited_range) 153 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 154 clk_type, 155 min, 156 max, 157 false); 158 159 return ret; 160 } 161 162 int smu_get_dpm_freq_range(struct smu_context *smu, 163 enum smu_clk_type clk_type, 164 uint32_t *min, 165 uint32_t *max) 166 { 167 int ret = -ENOTSUPP; 168 169 if (!min && !max) 170 return -EINVAL; 171 172 if (smu->ppt_funcs->get_dpm_ultimate_freq) 173 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 174 clk_type, 175 min, 176 max); 177 178 return ret; 179 } 180 181 int smu_set_gfx_power_up_by_imu(struct smu_context *smu) 182 { 183 int ret = 0; 184 struct amdgpu_device *adev = smu->adev; 185 186 if (smu->ppt_funcs->set_gfx_power_up_by_imu) { 187 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); 188 if (ret) 189 dev_err(adev->dev, "Failed to enable gfx imu!\n"); 190 } 191 return ret; 192 } 193 194 static u32 smu_get_mclk(void *handle, bool low) 195 { 196 struct smu_context *smu = handle; 197 uint32_t clk_freq; 198 int ret = 0; 199 200 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, 201 low ? &clk_freq : NULL, 202 !low ? &clk_freq : NULL); 203 if (ret) 204 return 0; 205 return clk_freq * 100; 206 } 207 208 static u32 smu_get_sclk(void *handle, bool low) 209 { 210 struct smu_context *smu = handle; 211 uint32_t clk_freq; 212 int ret = 0; 213 214 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, 215 low ? &clk_freq : NULL, 216 !low ? &clk_freq : NULL); 217 if (ret) 218 return 0; 219 return clk_freq * 100; 220 } 221 222 static int smu_set_gfx_imu_enable(struct smu_context *smu) 223 { 224 struct amdgpu_device *adev = smu->adev; 225 226 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 227 return 0; 228 229 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix) 230 return 0; 231 232 return smu_set_gfx_power_up_by_imu(smu); 233 } 234 235 static bool is_vcn_enabled(struct amdgpu_device *adev) 236 { 237 int i; 238 239 for (i = 0; i < adev->num_ip_blocks; i++) { 240 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN || 241 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) && 242 !adev->ip_blocks[i].status.valid) 243 return false; 244 } 245 246 return true; 247 } 248 249 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 250 bool enable, 251 int inst) 252 { 253 struct smu_power_context *smu_power = &smu->smu_power; 254 struct smu_power_gate *power_gate = &smu_power->power_gate; 255 int ret = 0; 256 257 /* 258 * don't poweron vcn/jpeg when they are skipped. 259 */ 260 if (!is_vcn_enabled(smu->adev)) 261 return 0; 262 263 if (!smu->ppt_funcs->dpm_set_vcn_enable) 264 return 0; 265 266 if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable) 267 return 0; 268 269 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst); 270 if (!ret) 271 atomic_set(&power_gate->vcn_gated[inst], !enable); 272 273 return ret; 274 } 275 276 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 277 bool enable) 278 { 279 struct smu_power_context *smu_power = &smu->smu_power; 280 struct smu_power_gate *power_gate = &smu_power->power_gate; 281 int ret = 0; 282 283 if (!is_vcn_enabled(smu->adev)) 284 return 0; 285 286 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 287 return 0; 288 289 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 290 return 0; 291 292 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 293 if (!ret) 294 atomic_set(&power_gate->jpeg_gated, !enable); 295 296 return ret; 297 } 298 299 static int smu_dpm_set_vpe_enable(struct smu_context *smu, 300 bool enable) 301 { 302 struct smu_power_context *smu_power = &smu->smu_power; 303 struct smu_power_gate *power_gate = &smu_power->power_gate; 304 int ret = 0; 305 306 if (!smu->ppt_funcs->dpm_set_vpe_enable) 307 return 0; 308 309 if (atomic_read(&power_gate->vpe_gated) ^ enable) 310 return 0; 311 312 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable); 313 if (!ret) 314 atomic_set(&power_gate->vpe_gated, !enable); 315 316 return ret; 317 } 318 319 static int smu_dpm_set_isp_enable(struct smu_context *smu, 320 bool enable) 321 { 322 struct smu_power_context *smu_power = &smu->smu_power; 323 struct smu_power_gate *power_gate = &smu_power->power_gate; 324 int ret; 325 326 if (!smu->ppt_funcs->dpm_set_isp_enable) 327 return 0; 328 329 if (atomic_read(&power_gate->isp_gated) ^ enable) 330 return 0; 331 332 ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable); 333 if (!ret) 334 atomic_set(&power_gate->isp_gated, !enable); 335 336 return ret; 337 } 338 339 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, 340 bool enable) 341 { 342 struct smu_power_context *smu_power = &smu->smu_power; 343 struct smu_power_gate *power_gate = &smu_power->power_gate; 344 int ret = 0; 345 346 if (!smu->adev->enable_umsch_mm) 347 return 0; 348 349 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable) 350 return 0; 351 352 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable) 353 return 0; 354 355 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable); 356 if (!ret) 357 atomic_set(&power_gate->umsch_mm_gated, !enable); 358 359 return ret; 360 } 361 362 static int smu_set_mall_enable(struct smu_context *smu) 363 { 364 int ret = 0; 365 366 if (!smu->ppt_funcs->set_mall_enable) 367 return 0; 368 369 ret = smu->ppt_funcs->set_mall_enable(smu); 370 371 return ret; 372 } 373 374 /** 375 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 376 * 377 * @handle: smu_context pointer 378 * @block_type: the IP block to power gate/ungate 379 * @gate: to power gate if true, ungate otherwise 380 * @inst: the instance of the IP block to power gate/ungate 381 * 382 * This API uses no smu->mutex lock protection due to: 383 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 384 * This is guarded to be race condition free by the caller. 385 * 2. Or get called on user setting request of power_dpm_force_performance_level. 386 * Under this case, the smu->mutex lock protection is already enforced on 387 * the parent API smu_force_performance_level of the call path. 388 */ 389 static int smu_dpm_set_power_gate(void *handle, 390 uint32_t block_type, 391 bool gate, 392 int inst) 393 { 394 struct smu_context *smu = handle; 395 int ret = 0; 396 397 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { 398 dev_WARN(smu->adev->dev, 399 "SMU uninitialized but power %s requested for %u!\n", 400 gate ? "gate" : "ungate", block_type); 401 return -EOPNOTSUPP; 402 } 403 404 switch (block_type) { 405 /* 406 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 407 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 408 */ 409 case AMD_IP_BLOCK_TYPE_UVD: 410 case AMD_IP_BLOCK_TYPE_VCN: 411 ret = smu_dpm_set_vcn_enable(smu, !gate, inst); 412 if (ret) 413 dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n", 414 gate ? "gate" : "ungate", inst); 415 break; 416 case AMD_IP_BLOCK_TYPE_GFX: 417 ret = smu_gfx_off_control(smu, gate); 418 if (ret) 419 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 420 gate ? "enable" : "disable"); 421 break; 422 case AMD_IP_BLOCK_TYPE_SDMA: 423 ret = smu_powergate_sdma(smu, gate); 424 if (ret) 425 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 426 gate ? "gate" : "ungate"); 427 break; 428 case AMD_IP_BLOCK_TYPE_JPEG: 429 ret = smu_dpm_set_jpeg_enable(smu, !gate); 430 if (ret) 431 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 432 gate ? "gate" : "ungate"); 433 break; 434 case AMD_IP_BLOCK_TYPE_VPE: 435 ret = smu_dpm_set_vpe_enable(smu, !gate); 436 if (ret) 437 dev_err(smu->adev->dev, "Failed to power %s VPE!\n", 438 gate ? "gate" : "ungate"); 439 break; 440 case AMD_IP_BLOCK_TYPE_ISP: 441 ret = smu_dpm_set_isp_enable(smu, !gate); 442 if (ret) 443 dev_err(smu->adev->dev, "Failed to power %s ISP!\n", 444 gate ? "gate" : "ungate"); 445 break; 446 default: 447 dev_err(smu->adev->dev, "Unsupported block type!\n"); 448 return -EINVAL; 449 } 450 451 return ret; 452 } 453 454 /** 455 * smu_set_user_clk_dependencies - set user profile clock dependencies 456 * 457 * @smu: smu_context pointer 458 * @clk: enum smu_clk_type type 459 * 460 * Enable/Disable the clock dependency for the @clk type. 461 */ 462 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) 463 { 464 if (smu->adev->in_suspend) 465 return; 466 467 if (clk == SMU_MCLK) { 468 smu->user_dpm_profile.clk_dependency = 0; 469 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); 470 } else if (clk == SMU_FCLK) { 471 /* MCLK takes precedence over FCLK */ 472 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 473 return; 474 475 smu->user_dpm_profile.clk_dependency = 0; 476 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); 477 } else if (clk == SMU_SOCCLK) { 478 /* MCLK takes precedence over SOCCLK */ 479 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 480 return; 481 482 smu->user_dpm_profile.clk_dependency = 0; 483 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); 484 } else 485 /* Add clk dependencies here, if any */ 486 return; 487 } 488 489 /** 490 * smu_restore_dpm_user_profile - reinstate user dpm profile 491 * 492 * @smu: smu_context pointer 493 * 494 * Restore the saved user power configurations include power limit, 495 * clock frequencies, fan control mode and fan speed. 496 */ 497 static void smu_restore_dpm_user_profile(struct smu_context *smu) 498 { 499 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 500 int ret = 0; 501 502 if (!smu->adev->in_suspend) 503 return; 504 505 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 506 return; 507 508 /* Enable restore flag */ 509 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; 510 511 /* set the user dpm power limits */ 512 for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) { 513 if (!smu->user_dpm_profile.power_limits[i]) 514 continue; 515 ret = smu_set_power_limit(smu, i, 516 smu->user_dpm_profile.power_limits[i]); 517 if (ret) 518 dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i); 519 } 520 521 /* set the user dpm clock configurations */ 522 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 523 enum smu_clk_type clk_type; 524 525 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) { 526 /* 527 * Iterate over smu clk type and force the saved user clk 528 * configs, skip if clock dependency is enabled 529 */ 530 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && 531 smu->user_dpm_profile.clk_mask[clk_type]) { 532 ret = smu_force_smuclk_levels(smu, clk_type, 533 smu->user_dpm_profile.clk_mask[clk_type]); 534 if (ret) 535 dev_err(smu->adev->dev, 536 "Failed to set clock type = %d\n", clk_type); 537 } 538 } 539 } 540 541 /* set the user dpm fan configurations */ 542 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || 543 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { 544 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); 545 if (ret != -EOPNOTSUPP) { 546 smu->user_dpm_profile.fan_speed_pwm = 0; 547 smu->user_dpm_profile.fan_speed_rpm = 0; 548 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; 549 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); 550 } 551 552 if (smu->user_dpm_profile.fan_speed_pwm) { 553 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); 554 if (ret != -EOPNOTSUPP) 555 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); 556 } 557 558 if (smu->user_dpm_profile.fan_speed_rpm) { 559 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); 560 if (ret != -EOPNOTSUPP) 561 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); 562 } 563 } 564 565 /* Restore user customized OD settings */ 566 if (smu->user_dpm_profile.user_od) { 567 if (smu->ppt_funcs->restore_user_od_settings) { 568 ret = smu->ppt_funcs->restore_user_od_settings(smu); 569 if (ret) 570 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); 571 } 572 } 573 574 /* Disable restore flag */ 575 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; 576 } 577 578 static int smu_get_power_num_states(void *handle, 579 struct pp_states_info *state_info) 580 { 581 if (!state_info) 582 return -EINVAL; 583 584 /* not support power state */ 585 memset(state_info, 0, sizeof(struct pp_states_info)); 586 state_info->nums = 1; 587 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 588 589 return 0; 590 } 591 592 bool is_support_sw_smu(struct amdgpu_device *adev) 593 { 594 /* vega20 is 11.0.2, but it's supported via the powerplay code */ 595 if (adev->asic_type == CHIP_VEGA20) 596 return false; 597 598 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) && 599 amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC)) 600 return true; 601 602 return false; 603 } 604 605 bool is_support_cclk_dpm(struct amdgpu_device *adev) 606 { 607 struct smu_context *smu = adev->powerplay.pp_handle; 608 609 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) 610 return false; 611 612 return true; 613 } 614 615 int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, 616 uint32_t param, uint32_t *read_arg) 617 { 618 struct smu_context *smu = adev->powerplay.pp_handle; 619 int ret = -EOPNOTSUPP; 620 621 if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg) 622 ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg); 623 624 return ret; 625 } 626 627 static int smu_sys_get_pp_table(void *handle, 628 char **table) 629 { 630 struct smu_context *smu = handle; 631 struct smu_table_context *smu_table = &smu->smu_table; 632 633 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 634 return -EOPNOTSUPP; 635 636 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 637 return -EINVAL; 638 639 if (smu_table->hardcode_pptable) 640 *table = smu_table->hardcode_pptable; 641 else 642 *table = smu_table->power_play_table; 643 644 return smu_table->power_play_table_size; 645 } 646 647 static int smu_sys_set_pp_table(void *handle, 648 const char *buf, 649 size_t size) 650 { 651 struct smu_context *smu = handle; 652 struct smu_table_context *smu_table = &smu->smu_table; 653 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 654 int ret = 0; 655 656 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 657 return -EOPNOTSUPP; 658 659 if (header->usStructureSize != size) { 660 dev_err(smu->adev->dev, "pp table size not matched !\n"); 661 return -EIO; 662 } 663 664 if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) { 665 kfree(smu_table->hardcode_pptable); 666 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 667 if (!smu_table->hardcode_pptable) 668 return -ENOMEM; 669 } 670 671 memcpy(smu_table->hardcode_pptable, buf, size); 672 smu_table->power_play_table = smu_table->hardcode_pptable; 673 smu_table->power_play_table_size = size; 674 675 /* 676 * Special hw_fini action(for Navi1x, the DPMs disablement will be 677 * skipped) may be needed for custom pptable uploading. 678 */ 679 smu->uploading_custom_pp_table = true; 680 681 ret = smu_reset(smu); 682 if (ret) 683 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 684 685 smu->uploading_custom_pp_table = false; 686 687 return ret; 688 } 689 690 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 691 { 692 struct smu_feature *feature = &smu->smu_feature; 693 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 694 int ret = 0; 695 696 /* 697 * With SCPM enabled, the allowed featuremasks setting(via 698 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted. 699 * That means there is no way to let PMFW knows the settings below. 700 * Thus, we just assume all the features are allowed under 701 * such scenario. 702 */ 703 if (smu->adev->scpm_enabled) { 704 bitmap_fill(feature->allowed, SMU_FEATURE_MAX); 705 return 0; 706 } 707 708 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 709 710 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 711 SMU_FEATURE_MAX/32); 712 if (ret) 713 return ret; 714 715 bitmap_or(feature->allowed, feature->allowed, 716 (unsigned long *)allowed_feature_mask, 717 feature->feature_num); 718 719 return ret; 720 } 721 722 static int smu_set_funcs(struct amdgpu_device *adev) 723 { 724 struct smu_context *smu = adev->powerplay.pp_handle; 725 726 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 727 smu->od_enabled = true; 728 729 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 730 case IP_VERSION(11, 0, 0): 731 case IP_VERSION(11, 0, 5): 732 case IP_VERSION(11, 0, 9): 733 navi10_set_ppt_funcs(smu); 734 break; 735 case IP_VERSION(11, 0, 7): 736 case IP_VERSION(11, 0, 11): 737 case IP_VERSION(11, 0, 12): 738 case IP_VERSION(11, 0, 13): 739 sienna_cichlid_set_ppt_funcs(smu); 740 break; 741 case IP_VERSION(12, 0, 0): 742 case IP_VERSION(12, 0, 1): 743 renoir_set_ppt_funcs(smu); 744 break; 745 case IP_VERSION(11, 5, 0): 746 case IP_VERSION(11, 5, 2): 747 vangogh_set_ppt_funcs(smu); 748 break; 749 case IP_VERSION(13, 0, 1): 750 case IP_VERSION(13, 0, 3): 751 case IP_VERSION(13, 0, 8): 752 yellow_carp_set_ppt_funcs(smu); 753 break; 754 case IP_VERSION(13, 0, 4): 755 case IP_VERSION(13, 0, 11): 756 smu_v13_0_4_set_ppt_funcs(smu); 757 break; 758 case IP_VERSION(13, 0, 5): 759 smu_v13_0_5_set_ppt_funcs(smu); 760 break; 761 case IP_VERSION(11, 0, 8): 762 cyan_skillfish_set_ppt_funcs(smu); 763 break; 764 case IP_VERSION(11, 0, 2): 765 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 766 arcturus_set_ppt_funcs(smu); 767 /* OD is not supported on Arcturus */ 768 smu->od_enabled = false; 769 break; 770 case IP_VERSION(13, 0, 2): 771 aldebaran_set_ppt_funcs(smu); 772 /* Enable pp_od_clk_voltage node */ 773 smu->od_enabled = true; 774 break; 775 case IP_VERSION(13, 0, 0): 776 case IP_VERSION(13, 0, 10): 777 smu_v13_0_0_set_ppt_funcs(smu); 778 break; 779 case IP_VERSION(13, 0, 6): 780 case IP_VERSION(13, 0, 14): 781 case IP_VERSION(13, 0, 12): 782 smu_v13_0_6_set_ppt_funcs(smu); 783 /* Enable pp_od_clk_voltage node */ 784 smu->od_enabled = true; 785 break; 786 case IP_VERSION(13, 0, 7): 787 smu_v13_0_7_set_ppt_funcs(smu); 788 break; 789 case IP_VERSION(14, 0, 0): 790 case IP_VERSION(14, 0, 1): 791 case IP_VERSION(14, 0, 4): 792 case IP_VERSION(14, 0, 5): 793 smu_v14_0_0_set_ppt_funcs(smu); 794 break; 795 case IP_VERSION(14, 0, 2): 796 case IP_VERSION(14, 0, 3): 797 smu_v14_0_2_set_ppt_funcs(smu); 798 break; 799 default: 800 return -EINVAL; 801 } 802 803 return 0; 804 } 805 806 static int smu_early_init(struct amdgpu_ip_block *ip_block) 807 { 808 struct amdgpu_device *adev = ip_block->adev; 809 struct smu_context *smu; 810 int r; 811 812 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); 813 if (!smu) 814 return -ENOMEM; 815 816 smu->adev = adev; 817 smu->pm_enabled = !!amdgpu_dpm; 818 smu->is_apu = false; 819 smu->smu_baco.state = SMU_BACO_STATE_NONE; 820 smu->smu_baco.platform_support = false; 821 smu->smu_baco.maco_support = false; 822 smu->user_dpm_profile.fan_mode = -1; 823 smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN; 824 825 mutex_init(&smu->message_lock); 826 827 adev->powerplay.pp_handle = smu; 828 adev->powerplay.pp_funcs = &swsmu_pm_funcs; 829 830 r = smu_set_funcs(adev); 831 if (r) 832 return r; 833 return smu_init_microcode(smu); 834 } 835 836 static int smu_set_default_dpm_table(struct smu_context *smu) 837 { 838 struct amdgpu_device *adev = smu->adev; 839 struct smu_power_context *smu_power = &smu->smu_power; 840 struct smu_power_gate *power_gate = &smu_power->power_gate; 841 int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i; 842 int ret = 0; 843 844 if (!smu->ppt_funcs->set_default_dpm_table) 845 return 0; 846 847 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 848 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 849 vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]); 850 } 851 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) 852 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 853 854 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 855 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 856 ret = smu_dpm_set_vcn_enable(smu, true, i); 857 if (ret) 858 return ret; 859 } 860 } 861 862 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 863 ret = smu_dpm_set_jpeg_enable(smu, true); 864 if (ret) 865 goto err_out; 866 } 867 868 ret = smu->ppt_funcs->set_default_dpm_table(smu); 869 if (ret) 870 dev_err(smu->adev->dev, 871 "Failed to setup default dpm clock tables!\n"); 872 873 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) 874 smu_dpm_set_jpeg_enable(smu, !jpeg_gate); 875 err_out: 876 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 877 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 878 smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i); 879 } 880 881 return ret; 882 } 883 884 static int smu_apply_default_config_table_settings(struct smu_context *smu) 885 { 886 struct amdgpu_device *adev = smu->adev; 887 int ret = 0; 888 889 ret = smu_get_default_config_table_settings(smu, 890 &adev->pm.config_table); 891 if (ret) 892 return ret; 893 894 return smu_set_config_table(smu, &adev->pm.config_table); 895 } 896 897 static int smu_late_init(struct amdgpu_ip_block *ip_block) 898 { 899 struct amdgpu_device *adev = ip_block->adev; 900 struct smu_context *smu = adev->powerplay.pp_handle; 901 int ret = 0; 902 903 smu_set_fine_grain_gfx_freq_parameters(smu); 904 905 if (!smu->pm_enabled) 906 return 0; 907 908 ret = smu_post_init(smu); 909 if (ret) { 910 dev_err(adev->dev, "Failed to post smu init!\n"); 911 return ret; 912 } 913 914 /* 915 * Explicitly notify PMFW the power mode the system in. Since 916 * the PMFW may boot the ASIC with a different mode. 917 * For those supporting ACDC switch via gpio, PMFW will 918 * handle the switch automatically. Driver involvement 919 * is unnecessary. 920 */ 921 adev->pm.ac_power = power_supply_is_system_supplied() > 0; 922 smu_set_ac_dc(smu); 923 924 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) || 925 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3))) 926 return 0; 927 928 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { 929 ret = smu_set_default_od_settings(smu); 930 if (ret) { 931 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 932 return ret; 933 } 934 } 935 936 ret = smu_populate_umd_state_clk(smu); 937 if (ret) { 938 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 939 return ret; 940 } 941 942 ret = smu_get_asic_power_limits(smu, 943 &smu->current_power_limit, 944 &smu->default_power_limit, 945 &smu->max_power_limit, 946 &smu->min_power_limit); 947 if (ret) { 948 dev_err(adev->dev, "Failed to get asic power limits!\n"); 949 return ret; 950 } 951 952 if (!amdgpu_sriov_vf(adev)) 953 smu_get_unique_id(smu); 954 955 smu_get_fan_parameters(smu); 956 957 smu_handle_task(smu, 958 smu->smu_dpm.dpm_level, 959 AMD_PP_TASK_COMPLETE_INIT); 960 961 ret = smu_apply_default_config_table_settings(smu); 962 if (ret && (ret != -EOPNOTSUPP)) { 963 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n"); 964 return ret; 965 } 966 967 smu_restore_dpm_user_profile(smu); 968 969 return 0; 970 } 971 972 static int smu_init_fb_allocations(struct smu_context *smu) 973 { 974 struct amdgpu_device *adev = smu->adev; 975 struct smu_table_context *smu_table = &smu->smu_table; 976 struct smu_table *tables = smu_table->tables; 977 struct smu_table *driver_table = &(smu_table->driver_table); 978 uint32_t max_table_size = 0; 979 int ret, i; 980 981 /* VRAM allocation for tool table */ 982 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 983 ret = amdgpu_bo_create_kernel(adev, 984 tables[SMU_TABLE_PMSTATUSLOG].size, 985 tables[SMU_TABLE_PMSTATUSLOG].align, 986 tables[SMU_TABLE_PMSTATUSLOG].domain, 987 &tables[SMU_TABLE_PMSTATUSLOG].bo, 988 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 989 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 990 if (ret) { 991 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 992 return ret; 993 } 994 } 995 996 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT; 997 /* VRAM allocation for driver table */ 998 for (i = 0; i < SMU_TABLE_COUNT; i++) { 999 if (tables[i].size == 0) 1000 continue; 1001 1002 /* If one of the tables has VRAM domain restriction, keep it in 1003 * VRAM 1004 */ 1005 if ((tables[i].domain & 1006 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) == 1007 AMDGPU_GEM_DOMAIN_VRAM) 1008 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 1009 1010 if (i == SMU_TABLE_PMSTATUSLOG) 1011 continue; 1012 1013 if (max_table_size < tables[i].size) 1014 max_table_size = tables[i].size; 1015 } 1016 1017 driver_table->size = max_table_size; 1018 driver_table->align = PAGE_SIZE; 1019 1020 ret = amdgpu_bo_create_kernel(adev, 1021 driver_table->size, 1022 driver_table->align, 1023 driver_table->domain, 1024 &driver_table->bo, 1025 &driver_table->mc_address, 1026 &driver_table->cpu_addr); 1027 if (ret) { 1028 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 1029 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 1030 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 1031 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 1032 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 1033 } 1034 1035 return ret; 1036 } 1037 1038 static int smu_fini_fb_allocations(struct smu_context *smu) 1039 { 1040 struct smu_table_context *smu_table = &smu->smu_table; 1041 struct smu_table *tables = smu_table->tables; 1042 struct smu_table *driver_table = &(smu_table->driver_table); 1043 1044 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 1045 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 1046 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 1047 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 1048 1049 amdgpu_bo_free_kernel(&driver_table->bo, 1050 &driver_table->mc_address, 1051 &driver_table->cpu_addr); 1052 1053 return 0; 1054 } 1055 1056 static void smu_update_gpu_addresses(struct smu_context *smu) 1057 { 1058 struct smu_table_context *smu_table = &smu->smu_table; 1059 struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG; 1060 struct smu_table *driver_table = &(smu_table->driver_table); 1061 struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table; 1062 1063 if (pm_status_table->bo) 1064 pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo); 1065 if (driver_table->bo) 1066 driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo); 1067 if (dummy_read_1_table->bo) 1068 dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo); 1069 } 1070 1071 /** 1072 * smu_alloc_memory_pool - allocate memory pool in the system memory 1073 * 1074 * @smu: amdgpu_device pointer 1075 * 1076 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 1077 * and DramLogSetDramAddr can notify it changed. 1078 * 1079 * Returns 0 on success, error on failure. 1080 */ 1081 static int smu_alloc_memory_pool(struct smu_context *smu) 1082 { 1083 struct amdgpu_device *adev = smu->adev; 1084 struct smu_table_context *smu_table = &smu->smu_table; 1085 struct smu_table *memory_pool = &smu_table->memory_pool; 1086 uint64_t pool_size = smu->pool_size; 1087 int ret = 0; 1088 1089 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 1090 return ret; 1091 1092 memory_pool->size = pool_size; 1093 memory_pool->align = PAGE_SIZE; 1094 memory_pool->domain = 1095 (adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ? 1096 AMDGPU_GEM_DOMAIN_VRAM : 1097 AMDGPU_GEM_DOMAIN_GTT; 1098 1099 switch (pool_size) { 1100 case SMU_MEMORY_POOL_SIZE_256_MB: 1101 case SMU_MEMORY_POOL_SIZE_512_MB: 1102 case SMU_MEMORY_POOL_SIZE_1_GB: 1103 case SMU_MEMORY_POOL_SIZE_2_GB: 1104 ret = amdgpu_bo_create_kernel(adev, 1105 memory_pool->size, 1106 memory_pool->align, 1107 memory_pool->domain, 1108 &memory_pool->bo, 1109 &memory_pool->mc_address, 1110 &memory_pool->cpu_addr); 1111 if (ret) 1112 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 1113 break; 1114 default: 1115 break; 1116 } 1117 1118 return ret; 1119 } 1120 1121 static int smu_free_memory_pool(struct smu_context *smu) 1122 { 1123 struct smu_table_context *smu_table = &smu->smu_table; 1124 struct smu_table *memory_pool = &smu_table->memory_pool; 1125 1126 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 1127 return 0; 1128 1129 amdgpu_bo_free_kernel(&memory_pool->bo, 1130 &memory_pool->mc_address, 1131 &memory_pool->cpu_addr); 1132 1133 memset(memory_pool, 0, sizeof(struct smu_table)); 1134 1135 return 0; 1136 } 1137 1138 static int smu_alloc_dummy_read_table(struct smu_context *smu) 1139 { 1140 struct smu_table_context *smu_table = &smu->smu_table; 1141 struct smu_table *dummy_read_1_table = 1142 &smu_table->dummy_read_1_table; 1143 struct amdgpu_device *adev = smu->adev; 1144 int ret = 0; 1145 1146 if (!dummy_read_1_table->size) 1147 return 0; 1148 1149 ret = amdgpu_bo_create_kernel(adev, 1150 dummy_read_1_table->size, 1151 dummy_read_1_table->align, 1152 dummy_read_1_table->domain, 1153 &dummy_read_1_table->bo, 1154 &dummy_read_1_table->mc_address, 1155 &dummy_read_1_table->cpu_addr); 1156 if (ret) 1157 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 1158 1159 return ret; 1160 } 1161 1162 static void smu_free_dummy_read_table(struct smu_context *smu) 1163 { 1164 struct smu_table_context *smu_table = &smu->smu_table; 1165 struct smu_table *dummy_read_1_table = 1166 &smu_table->dummy_read_1_table; 1167 1168 1169 amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 1170 &dummy_read_1_table->mc_address, 1171 &dummy_read_1_table->cpu_addr); 1172 1173 memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 1174 } 1175 1176 static int smu_smc_table_sw_init(struct smu_context *smu) 1177 { 1178 int ret; 1179 1180 /** 1181 * Create smu_table structure, and init smc tables such as 1182 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 1183 */ 1184 ret = smu_init_smc_tables(smu); 1185 if (ret) { 1186 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 1187 return ret; 1188 } 1189 1190 /** 1191 * Create smu_power_context structure, and allocate smu_dpm_context and 1192 * context size to fill the smu_power_context data. 1193 */ 1194 ret = smu_init_power(smu); 1195 if (ret) { 1196 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 1197 return ret; 1198 } 1199 1200 /* 1201 * allocate vram bos to store smc table contents. 1202 */ 1203 ret = smu_init_fb_allocations(smu); 1204 if (ret) 1205 return ret; 1206 1207 ret = smu_alloc_memory_pool(smu); 1208 if (ret) 1209 return ret; 1210 1211 ret = smu_alloc_dummy_read_table(smu); 1212 if (ret) 1213 return ret; 1214 1215 ret = smu_i2c_init(smu); 1216 if (ret) 1217 return ret; 1218 1219 return 0; 1220 } 1221 1222 static int smu_smc_table_sw_fini(struct smu_context *smu) 1223 { 1224 int ret; 1225 1226 smu_i2c_fini(smu); 1227 1228 smu_free_dummy_read_table(smu); 1229 1230 ret = smu_free_memory_pool(smu); 1231 if (ret) 1232 return ret; 1233 1234 ret = smu_fini_fb_allocations(smu); 1235 if (ret) 1236 return ret; 1237 1238 ret = smu_fini_power(smu); 1239 if (ret) { 1240 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 1241 return ret; 1242 } 1243 1244 ret = smu_fini_smc_tables(smu); 1245 if (ret) { 1246 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 1247 return ret; 1248 } 1249 1250 return 0; 1251 } 1252 1253 static void smu_throttling_logging_work_fn(struct work_struct *work) 1254 { 1255 struct smu_context *smu = container_of(work, struct smu_context, 1256 throttling_logging_work); 1257 1258 smu_log_thermal_throttling(smu); 1259 } 1260 1261 static void smu_interrupt_work_fn(struct work_struct *work) 1262 { 1263 struct smu_context *smu = container_of(work, struct smu_context, 1264 interrupt_work); 1265 1266 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 1267 smu->ppt_funcs->interrupt_work(smu); 1268 } 1269 1270 static void smu_swctf_delayed_work_handler(struct work_struct *work) 1271 { 1272 struct smu_context *smu = 1273 container_of(work, struct smu_context, swctf_delayed_work.work); 1274 struct smu_temperature_range *range = 1275 &smu->thermal_range; 1276 struct amdgpu_device *adev = smu->adev; 1277 uint32_t hotspot_tmp, size; 1278 1279 /* 1280 * If the hotspot temperature is confirmed as below SW CTF setting point 1281 * after the delay enforced, nothing will be done. 1282 * Otherwise, a graceful shutdown will be performed to prevent further damage. 1283 */ 1284 if (range->software_shutdown_temp && 1285 smu->ppt_funcs->read_sensor && 1286 !smu->ppt_funcs->read_sensor(smu, 1287 AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 1288 &hotspot_tmp, 1289 &size) && 1290 hotspot_tmp / 1000 < range->software_shutdown_temp) 1291 return; 1292 1293 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 1294 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 1295 orderly_poweroff(true); 1296 } 1297 1298 static void smu_init_xgmi_plpd_mode(struct smu_context *smu) 1299 { 1300 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm); 1301 struct smu_dpm_policy_ctxt *policy_ctxt; 1302 struct smu_dpm_policy *policy; 1303 1304 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD); 1305 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { 1306 if (policy) 1307 policy->current_level = XGMI_PLPD_DEFAULT; 1308 return; 1309 } 1310 1311 /* PMFW put PLPD into default policy after enabling the feature */ 1312 if (smu_feature_is_enabled(smu, 1313 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) { 1314 if (policy) 1315 policy->current_level = XGMI_PLPD_DEFAULT; 1316 } else { 1317 policy_ctxt = dpm_ctxt->dpm_policies; 1318 if (policy_ctxt) 1319 policy_ctxt->policy_mask &= 1320 ~BIT(PP_PM_POLICY_XGMI_PLPD); 1321 } 1322 } 1323 1324 static void smu_init_power_profile(struct smu_context *smu) 1325 { 1326 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) 1327 smu->power_profile_mode = 1328 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1329 smu_power_profile_mode_get(smu, smu->power_profile_mode); 1330 } 1331 1332 void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id) 1333 { 1334 struct smu_feature_cap *fea_cap = &smu->fea_cap; 1335 1336 if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) 1337 return; 1338 1339 set_bit(fea_id, fea_cap->cap_map); 1340 } 1341 1342 bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id) 1343 { 1344 struct smu_feature_cap *fea_cap = &smu->fea_cap; 1345 1346 if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) 1347 return false; 1348 1349 return test_bit(fea_id, fea_cap->cap_map); 1350 } 1351 1352 static void smu_feature_cap_init(struct smu_context *smu) 1353 { 1354 struct smu_feature_cap *fea_cap = &smu->fea_cap; 1355 1356 bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT); 1357 } 1358 1359 static int smu_sw_init(struct amdgpu_ip_block *ip_block) 1360 { 1361 struct amdgpu_device *adev = ip_block->adev; 1362 struct smu_context *smu = adev->powerplay.pp_handle; 1363 int i, ret; 1364 1365 smu->pool_size = adev->pm.smu_prv_buffer_size; 1366 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 1367 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 1368 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 1369 1370 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 1371 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 1372 atomic64_set(&smu->throttle_int_counter, 0); 1373 smu->watermarks_bitmap = 0; 1374 1375 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 1376 atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); 1377 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 1378 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); 1379 atomic_set(&smu->smu_power.power_gate.isp_gated, 1); 1380 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); 1381 1382 smu_init_power_profile(smu); 1383 smu->display_config = &adev->pm.pm_display_cfg; 1384 1385 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1386 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1387 1388 INIT_DELAYED_WORK(&smu->swctf_delayed_work, 1389 smu_swctf_delayed_work_handler); 1390 1391 smu_feature_cap_init(smu); 1392 1393 ret = smu_smc_table_sw_init(smu); 1394 if (ret) { 1395 dev_err(adev->dev, "Failed to sw init smc table!\n"); 1396 return ret; 1397 } 1398 1399 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1400 ret = smu_get_vbios_bootup_values(smu); 1401 if (ret) { 1402 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1403 return ret; 1404 } 1405 1406 ret = smu_init_pptable_microcode(smu); 1407 if (ret) { 1408 dev_err(adev->dev, "Failed to setup pptable firmware!\n"); 1409 return ret; 1410 } 1411 1412 ret = smu_register_irq_handler(smu); 1413 if (ret) { 1414 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 1415 return ret; 1416 } 1417 1418 /* If there is no way to query fan control mode, fan control is not supported */ 1419 if (!smu->ppt_funcs->get_fan_control_mode) 1420 smu->adev->pm.no_fan = true; 1421 1422 return 0; 1423 } 1424 1425 static int smu_sw_fini(struct amdgpu_ip_block *ip_block) 1426 { 1427 struct amdgpu_device *adev = ip_block->adev; 1428 struct smu_context *smu = adev->powerplay.pp_handle; 1429 int ret; 1430 1431 ret = smu_smc_table_sw_fini(smu); 1432 if (ret) { 1433 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 1434 return ret; 1435 } 1436 1437 if (smu->custom_profile_params) { 1438 kfree(smu->custom_profile_params); 1439 smu->custom_profile_params = NULL; 1440 } 1441 1442 smu_fini_microcode(smu); 1443 1444 return 0; 1445 } 1446 1447 static int smu_get_thermal_temperature_range(struct smu_context *smu) 1448 { 1449 struct amdgpu_device *adev = smu->adev; 1450 struct smu_temperature_range *range = 1451 &smu->thermal_range; 1452 int ret = 0; 1453 1454 if (!smu->ppt_funcs->get_thermal_temperature_range) 1455 return 0; 1456 1457 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 1458 if (ret) 1459 return ret; 1460 1461 adev->pm.dpm.thermal.min_temp = range->min; 1462 adev->pm.dpm.thermal.max_temp = range->max; 1463 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 1464 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 1465 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 1466 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 1467 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 1468 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 1469 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 1470 1471 return ret; 1472 } 1473 1474 /** 1475 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges 1476 * 1477 * @smu: smu_context pointer 1478 * 1479 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling. 1480 * Returns 0 on success, error on failure. 1481 */ 1482 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu) 1483 { 1484 struct wbrf_ranges_in_out wbrf_exclusion = {0}; 1485 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list; 1486 struct amdgpu_device *adev = smu->adev; 1487 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES; 1488 uint64_t start, end; 1489 int ret, i, j; 1490 1491 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion); 1492 if (ret) { 1493 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n"); 1494 return ret; 1495 } 1496 1497 /* 1498 * The exclusion ranges array we got might be filled with holes and duplicate 1499 * entries. For example: 1500 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...} 1501 * We need to do some sortups to eliminate those holes and duplicate entries. 1502 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...} 1503 */ 1504 for (i = 0; i < num_of_wbrf_ranges; i++) { 1505 start = wifi_bands[i].start; 1506 end = wifi_bands[i].end; 1507 1508 /* get the last valid entry to fill the intermediate hole */ 1509 if (!start && !end) { 1510 for (j = num_of_wbrf_ranges - 1; j > i; j--) 1511 if (wifi_bands[j].start && wifi_bands[j].end) 1512 break; 1513 1514 /* no valid entry left */ 1515 if (j <= i) 1516 break; 1517 1518 start = wifi_bands[i].start = wifi_bands[j].start; 1519 end = wifi_bands[i].end = wifi_bands[j].end; 1520 wifi_bands[j].start = 0; 1521 wifi_bands[j].end = 0; 1522 num_of_wbrf_ranges = j; 1523 } 1524 1525 /* eliminate duplicate entries */ 1526 for (j = i + 1; j < num_of_wbrf_ranges; j++) { 1527 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) { 1528 wifi_bands[j].start = 0; 1529 wifi_bands[j].end = 0; 1530 } 1531 } 1532 } 1533 1534 /* Send the sorted wifi_bands to PMFW */ 1535 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); 1536 /* Try to set the wifi_bands again */ 1537 if (unlikely(ret == -EBUSY)) { 1538 mdelay(5); 1539 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); 1540 } 1541 1542 return ret; 1543 } 1544 1545 /** 1546 * smu_wbrf_event_handler - handle notify events 1547 * 1548 * @nb: notifier block 1549 * @action: event type 1550 * @_arg: event data 1551 * 1552 * Calls relevant amdgpu function in response to wbrf event 1553 * notification from kernel. 1554 */ 1555 static int smu_wbrf_event_handler(struct notifier_block *nb, 1556 unsigned long action, void *_arg) 1557 { 1558 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier); 1559 1560 switch (action) { 1561 case WBRF_CHANGED: 1562 schedule_delayed_work(&smu->wbrf_delayed_work, 1563 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); 1564 break; 1565 default: 1566 return NOTIFY_DONE; 1567 } 1568 1569 return NOTIFY_OK; 1570 } 1571 1572 /** 1573 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired 1574 * 1575 * @work: struct work_struct pointer 1576 * 1577 * Flood is over and driver will consume the latest exclusion ranges. 1578 */ 1579 static void smu_wbrf_delayed_work_handler(struct work_struct *work) 1580 { 1581 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work); 1582 1583 smu_wbrf_handle_exclusion_ranges(smu); 1584 } 1585 1586 /** 1587 * smu_wbrf_support_check - check wbrf support 1588 * 1589 * @smu: smu_context pointer 1590 * 1591 * Verifies the ACPI interface whether wbrf is supported. 1592 */ 1593 static void smu_wbrf_support_check(struct smu_context *smu) 1594 { 1595 struct amdgpu_device *adev = smu->adev; 1596 1597 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf && 1598 acpi_amd_wbrf_supported_consumer(adev->dev); 1599 1600 if (smu->wbrf_supported) 1601 dev_info(adev->dev, "RF interference mitigation is supported\n"); 1602 } 1603 1604 /** 1605 * smu_wbrf_init - init driver wbrf support 1606 * 1607 * @smu: smu_context pointer 1608 * 1609 * Verifies the AMD ACPI interfaces and registers with the wbrf 1610 * notifier chain if wbrf feature is supported. 1611 * Returns 0 on success, error on failure. 1612 */ 1613 static int smu_wbrf_init(struct smu_context *smu) 1614 { 1615 int ret; 1616 1617 if (!smu->wbrf_supported) 1618 return 0; 1619 1620 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler); 1621 1622 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler; 1623 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier); 1624 if (ret) 1625 return ret; 1626 1627 /* 1628 * Some wifiband exclusion ranges may be already there 1629 * before our driver loaded. To make sure our driver 1630 * is awared of those exclusion ranges. 1631 */ 1632 schedule_delayed_work(&smu->wbrf_delayed_work, 1633 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); 1634 1635 return 0; 1636 } 1637 1638 /** 1639 * smu_wbrf_fini - tear down driver wbrf support 1640 * 1641 * @smu: smu_context pointer 1642 * 1643 * Unregisters with the wbrf notifier chain. 1644 */ 1645 static void smu_wbrf_fini(struct smu_context *smu) 1646 { 1647 if (!smu->wbrf_supported) 1648 return; 1649 1650 amd_wbrf_unregister_notifier(&smu->wbrf_notifier); 1651 1652 cancel_delayed_work_sync(&smu->wbrf_delayed_work); 1653 } 1654 1655 static int smu_smc_hw_setup(struct smu_context *smu) 1656 { 1657 struct smu_feature *feature = &smu->smu_feature; 1658 struct amdgpu_device *adev = smu->adev; 1659 uint8_t pcie_gen = 0, pcie_width = 0; 1660 uint64_t features_supported; 1661 int ret = 0; 1662 1663 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1664 case IP_VERSION(11, 0, 7): 1665 case IP_VERSION(11, 0, 11): 1666 case IP_VERSION(11, 5, 0): 1667 case IP_VERSION(11, 5, 2): 1668 case IP_VERSION(11, 0, 12): 1669 if (adev->in_suspend && smu_is_dpm_running(smu)) { 1670 dev_info(adev->dev, "dpm has been enabled\n"); 1671 ret = smu_system_features_control(smu, true); 1672 if (ret) 1673 dev_err(adev->dev, "Failed system features control!\n"); 1674 return ret; 1675 } 1676 break; 1677 default: 1678 break; 1679 } 1680 1681 ret = smu_init_display_count(smu, 0); 1682 if (ret) { 1683 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 1684 return ret; 1685 } 1686 1687 ret = smu_set_driver_table_location(smu); 1688 if (ret) { 1689 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 1690 return ret; 1691 } 1692 1693 /* 1694 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1695 */ 1696 ret = smu_set_tool_table_location(smu); 1697 if (ret) { 1698 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 1699 return ret; 1700 } 1701 1702 /* 1703 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1704 * pool location. 1705 */ 1706 ret = smu_notify_memory_pool_location(smu); 1707 if (ret) { 1708 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 1709 return ret; 1710 } 1711 1712 /* 1713 * It is assumed the pptable used before runpm is same as 1714 * the one used afterwards. Thus, we can reuse the stored 1715 * copy and do not need to resetup the pptable again. 1716 */ 1717 if (!adev->in_runpm) { 1718 ret = smu_setup_pptable(smu); 1719 if (ret) { 1720 dev_err(adev->dev, "Failed to setup pptable!\n"); 1721 return ret; 1722 } 1723 } 1724 1725 /* smu_dump_pptable(smu); */ 1726 1727 /* 1728 * With SCPM enabled, PSP is responsible for the PPTable transferring 1729 * (to SMU). Driver involvement is not needed and permitted. 1730 */ 1731 if (!adev->scpm_enabled) { 1732 /* 1733 * Copy pptable bo in the vram to smc with SMU MSGs such as 1734 * SetDriverDramAddr and TransferTableDram2Smu. 1735 */ 1736 ret = smu_write_pptable(smu); 1737 if (ret) { 1738 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 1739 return ret; 1740 } 1741 } 1742 1743 /* issue Run*Btc msg */ 1744 ret = smu_run_btc(smu); 1745 if (ret) 1746 return ret; 1747 1748 /* Enable UclkShadow on wbrf supported */ 1749 if (smu->wbrf_supported) { 1750 ret = smu_enable_uclk_shadow(smu, true); 1751 if (ret) { 1752 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n"); 1753 return ret; 1754 } 1755 } 1756 1757 /* 1758 * With SCPM enabled, these actions(and relevant messages) are 1759 * not needed and permitted. 1760 */ 1761 if (!adev->scpm_enabled) { 1762 ret = smu_feature_set_allowed_mask(smu); 1763 if (ret) { 1764 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 1765 return ret; 1766 } 1767 } 1768 1769 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) 1770 pcie_gen = 4; 1771 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1772 pcie_gen = 3; 1773 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1774 pcie_gen = 2; 1775 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1776 pcie_gen = 1; 1777 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1778 pcie_gen = 0; 1779 1780 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1781 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1782 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1783 */ 1784 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32) 1785 pcie_width = 7; 1786 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1787 pcie_width = 6; 1788 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1789 pcie_width = 5; 1790 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1791 pcie_width = 4; 1792 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1793 pcie_width = 3; 1794 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1795 pcie_width = 2; 1796 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1797 pcie_width = 1; 1798 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1799 if (ret) { 1800 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1801 return ret; 1802 } 1803 1804 ret = smu_system_features_control(smu, true); 1805 if (ret) { 1806 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 1807 return ret; 1808 } 1809 1810 smu_init_xgmi_plpd_mode(smu); 1811 1812 ret = smu_feature_get_enabled_mask(smu, &features_supported); 1813 if (ret) { 1814 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); 1815 return ret; 1816 } 1817 bitmap_copy(feature->supported, 1818 (unsigned long *)&features_supported, 1819 feature->feature_num); 1820 1821 if (!smu_is_dpm_running(smu)) 1822 dev_info(adev->dev, "dpm has been disabled\n"); 1823 1824 /* 1825 * Set initialized values (get from vbios) to dpm tables context such as 1826 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1827 * type of clks. 1828 */ 1829 ret = smu_set_default_dpm_table(smu); 1830 if (ret) { 1831 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 1832 return ret; 1833 } 1834 1835 ret = smu_get_thermal_temperature_range(smu); 1836 if (ret) { 1837 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1838 return ret; 1839 } 1840 1841 ret = smu_enable_thermal_alert(smu); 1842 if (ret) { 1843 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1844 return ret; 1845 } 1846 1847 ret = smu_notify_display_change(smu); 1848 if (ret) { 1849 dev_err(adev->dev, "Failed to notify display change!\n"); 1850 return ret; 1851 } 1852 1853 /* 1854 * Set min deep sleep dce fclk with bootup value from vbios via 1855 * SetMinDeepSleepDcefclk MSG. 1856 */ 1857 ret = smu_set_min_dcef_deep_sleep(smu, 1858 smu->smu_table.boot_values.dcefclk / 100); 1859 if (ret) { 1860 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n"); 1861 return ret; 1862 } 1863 1864 /* Init wbrf support. Properly setup the notifier */ 1865 ret = smu_wbrf_init(smu); 1866 if (ret) 1867 dev_err(adev->dev, "Error during wbrf init call\n"); 1868 1869 return ret; 1870 } 1871 1872 static int smu_start_smc_engine(struct smu_context *smu) 1873 { 1874 struct amdgpu_device *adev = smu->adev; 1875 int ret = 0; 1876 1877 if (amdgpu_virt_xgmi_migrate_enabled(adev)) 1878 smu_update_gpu_addresses(smu); 1879 1880 smu->smc_fw_state = SMU_FW_INIT; 1881 1882 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1883 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) { 1884 if (smu->ppt_funcs->load_microcode) { 1885 ret = smu->ppt_funcs->load_microcode(smu); 1886 if (ret) 1887 return ret; 1888 } 1889 } 1890 } 1891 1892 if (smu->ppt_funcs->check_fw_status) { 1893 ret = smu->ppt_funcs->check_fw_status(smu); 1894 if (ret) { 1895 dev_err(adev->dev, "SMC is not ready\n"); 1896 return ret; 1897 } 1898 } 1899 1900 /* 1901 * Send msg GetDriverIfVersion to check if the return value is equal 1902 * with DRIVER_IF_VERSION of smc header. 1903 */ 1904 ret = smu_check_fw_version(smu); 1905 if (ret) 1906 return ret; 1907 1908 return ret; 1909 } 1910 1911 static int smu_hw_init(struct amdgpu_ip_block *ip_block) 1912 { 1913 int i, ret; 1914 struct amdgpu_device *adev = ip_block->adev; 1915 struct smu_context *smu = adev->powerplay.pp_handle; 1916 1917 if (amdgpu_sriov_multi_vf_mode(adev)) { 1918 smu->pm_enabled = false; 1919 return 0; 1920 } 1921 1922 ret = smu_start_smc_engine(smu); 1923 if (ret) { 1924 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1925 return ret; 1926 } 1927 1928 /* 1929 * Check whether wbrf is supported. This needs to be done 1930 * before SMU setup starts since part of SMU configuration 1931 * relies on this. 1932 */ 1933 smu_wbrf_support_check(smu); 1934 1935 if (smu->is_apu) { 1936 ret = smu_set_gfx_imu_enable(smu); 1937 if (ret) 1938 return ret; 1939 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 1940 smu_dpm_set_vcn_enable(smu, true, i); 1941 smu_dpm_set_jpeg_enable(smu, true); 1942 smu_dpm_set_umsch_mm_enable(smu, true); 1943 smu_set_mall_enable(smu); 1944 smu_set_gfx_cgpg(smu, true); 1945 } 1946 1947 if (!smu->pm_enabled) 1948 return 0; 1949 1950 ret = smu_get_driver_allowed_feature_mask(smu); 1951 if (ret) 1952 return ret; 1953 1954 ret = smu_smc_hw_setup(smu); 1955 if (ret) { 1956 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1957 return ret; 1958 } 1959 1960 /* 1961 * Move maximum sustainable clock retrieving here considering 1962 * 1. It is not needed on resume(from S3). 1963 * 2. DAL settings come between .hw_init and .late_init of SMU. 1964 * And DAL needs to know the maximum sustainable clocks. Thus 1965 * it cannot be put in .late_init(). 1966 */ 1967 ret = smu_init_max_sustainable_clocks(smu); 1968 if (ret) { 1969 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1970 return ret; 1971 } 1972 1973 adev->pm.dpm_enabled = true; 1974 1975 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1976 1977 return 0; 1978 } 1979 1980 static int smu_disable_dpms(struct smu_context *smu) 1981 { 1982 struct amdgpu_device *adev = smu->adev; 1983 int ret = 0; 1984 bool use_baco = !smu->is_apu && 1985 ((amdgpu_in_reset(adev) && 1986 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1987 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); 1988 1989 /* 1990 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) 1991 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues. 1992 */ 1993 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1994 case IP_VERSION(13, 0, 0): 1995 case IP_VERSION(13, 0, 7): 1996 case IP_VERSION(13, 0, 10): 1997 case IP_VERSION(14, 0, 2): 1998 case IP_VERSION(14, 0, 3): 1999 return 0; 2000 default: 2001 break; 2002 } 2003 2004 /* 2005 * For custom pptable uploading, skip the DPM features 2006 * disable process on Navi1x ASICs. 2007 * - As the gfx related features are under control of 2008 * RLC on those ASICs. RLC reinitialization will be 2009 * needed to reenable them. That will cost much more 2010 * efforts. 2011 * 2012 * - SMU firmware can handle the DPM reenablement 2013 * properly. 2014 */ 2015 if (smu->uploading_custom_pp_table) { 2016 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2017 case IP_VERSION(11, 0, 0): 2018 case IP_VERSION(11, 0, 5): 2019 case IP_VERSION(11, 0, 9): 2020 case IP_VERSION(11, 0, 7): 2021 case IP_VERSION(11, 0, 11): 2022 case IP_VERSION(11, 5, 0): 2023 case IP_VERSION(11, 5, 2): 2024 case IP_VERSION(11, 0, 12): 2025 case IP_VERSION(11, 0, 13): 2026 return 0; 2027 default: 2028 break; 2029 } 2030 } 2031 2032 /* 2033 * For Sienna_Cichlid, PMFW will handle the features disablement properly 2034 * on BACO in. Driver involvement is unnecessary. 2035 */ 2036 if (use_baco) { 2037 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2038 case IP_VERSION(11, 0, 7): 2039 case IP_VERSION(11, 0, 0): 2040 case IP_VERSION(11, 0, 5): 2041 case IP_VERSION(11, 0, 9): 2042 case IP_VERSION(13, 0, 7): 2043 return 0; 2044 default: 2045 break; 2046 } 2047 } 2048 2049 /* 2050 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly 2051 * for gpu reset and S0i3 cases. Driver involvement is unnecessary. 2052 */ 2053 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 && 2054 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix)) 2055 return 0; 2056 2057 /* 2058 * For gpu reset, runpm and hibernation through BACO, 2059 * BACO feature has to be kept enabled. 2060 */ 2061 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 2062 ret = smu_disable_all_features_with_exception(smu, 2063 SMU_FEATURE_BACO_BIT); 2064 if (ret) 2065 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 2066 } else { 2067 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */ 2068 if (!adev->scpm_enabled) { 2069 ret = smu_system_features_control(smu, false); 2070 if (ret) 2071 dev_err(adev->dev, "Failed to disable smu features.\n"); 2072 } 2073 } 2074 2075 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction. 2076 * otherwise SMU will hang while interacting with RLC if RLC is halted 2077 * this is a WA for Vangogh asic which fix the SMU hang issue. 2078 */ 2079 ret = smu_notify_rlc_state(smu, false); 2080 if (ret) { 2081 dev_err(adev->dev, "Fail to notify rlc status!\n"); 2082 return ret; 2083 } 2084 2085 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) && 2086 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) && 2087 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop) 2088 adev->gfx.rlc.funcs->stop(adev); 2089 2090 return ret; 2091 } 2092 2093 static int smu_smc_hw_cleanup(struct smu_context *smu) 2094 { 2095 struct amdgpu_device *adev = smu->adev; 2096 int ret = 0; 2097 2098 smu_wbrf_fini(smu); 2099 2100 cancel_work_sync(&smu->throttling_logging_work); 2101 cancel_work_sync(&smu->interrupt_work); 2102 2103 ret = smu_disable_thermal_alert(smu); 2104 if (ret) { 2105 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 2106 return ret; 2107 } 2108 2109 cancel_delayed_work_sync(&smu->swctf_delayed_work); 2110 2111 ret = smu_disable_dpms(smu); 2112 if (ret) { 2113 dev_err(adev->dev, "Fail to disable dpm features!\n"); 2114 return ret; 2115 } 2116 2117 return 0; 2118 } 2119 2120 static int smu_reset_mp1_state(struct smu_context *smu) 2121 { 2122 struct amdgpu_device *adev = smu->adev; 2123 int ret = 0; 2124 2125 if ((!adev->in_runpm) && (!adev->in_suspend) && 2126 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) == 2127 IP_VERSION(13, 0, 10) && 2128 !amdgpu_device_has_display_hardware(adev)) 2129 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); 2130 2131 return ret; 2132 } 2133 2134 static int smu_hw_fini(struct amdgpu_ip_block *ip_block) 2135 { 2136 struct amdgpu_device *adev = ip_block->adev; 2137 struct smu_context *smu = adev->powerplay.pp_handle; 2138 int i, ret; 2139 2140 if (amdgpu_sriov_multi_vf_mode(adev)) 2141 return 0; 2142 2143 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 2144 smu_dpm_set_vcn_enable(smu, false, i); 2145 adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE; 2146 } 2147 smu_dpm_set_jpeg_enable(smu, false); 2148 adev->jpeg.cur_state = AMD_PG_STATE_GATE; 2149 smu_dpm_set_umsch_mm_enable(smu, false); 2150 2151 if (!smu->pm_enabled) 2152 return 0; 2153 2154 adev->pm.dpm_enabled = false; 2155 2156 ret = smu_smc_hw_cleanup(smu); 2157 if (ret) 2158 return ret; 2159 2160 ret = smu_reset_mp1_state(smu); 2161 if (ret) 2162 return ret; 2163 2164 return 0; 2165 } 2166 2167 static void smu_late_fini(struct amdgpu_ip_block *ip_block) 2168 { 2169 struct amdgpu_device *adev = ip_block->adev; 2170 struct smu_context *smu = adev->powerplay.pp_handle; 2171 2172 kfree(smu); 2173 } 2174 2175 static int smu_reset(struct smu_context *smu) 2176 { 2177 struct amdgpu_device *adev = smu->adev; 2178 struct amdgpu_ip_block *ip_block; 2179 int ret; 2180 2181 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC); 2182 if (!ip_block) 2183 return -EINVAL; 2184 2185 ret = smu_hw_fini(ip_block); 2186 if (ret) 2187 return ret; 2188 2189 ret = smu_hw_init(ip_block); 2190 if (ret) 2191 return ret; 2192 2193 ret = smu_late_init(ip_block); 2194 if (ret) 2195 return ret; 2196 2197 return 0; 2198 } 2199 2200 static int smu_suspend(struct amdgpu_ip_block *ip_block) 2201 { 2202 struct amdgpu_device *adev = ip_block->adev; 2203 struct smu_context *smu = adev->powerplay.pp_handle; 2204 int ret; 2205 uint64_t count; 2206 2207 if (amdgpu_sriov_multi_vf_mode(adev)) 2208 return 0; 2209 2210 if (!smu->pm_enabled) 2211 return 0; 2212 2213 adev->pm.dpm_enabled = false; 2214 2215 ret = smu_smc_hw_cleanup(smu); 2216 if (ret) 2217 return ret; 2218 2219 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 2220 2221 smu_set_gfx_cgpg(smu, false); 2222 2223 /* 2224 * pwfw resets entrycount when device is suspended, so we save the 2225 * last value to be used when we resume to keep it consistent 2226 */ 2227 ret = smu_get_entrycount_gfxoff(smu, &count); 2228 if (!ret) 2229 adev->gfx.gfx_off_entrycount = count; 2230 2231 /* clear this on suspend so it will get reprogrammed on resume */ 2232 smu->workload_mask = 0; 2233 2234 return 0; 2235 } 2236 2237 static int smu_resume(struct amdgpu_ip_block *ip_block) 2238 { 2239 int ret; 2240 struct amdgpu_device *adev = ip_block->adev; 2241 struct smu_context *smu = adev->powerplay.pp_handle; 2242 2243 if (amdgpu_sriov_multi_vf_mode(adev)) 2244 return 0; 2245 2246 if (!smu->pm_enabled) 2247 return 0; 2248 2249 dev_info(adev->dev, "SMU is resuming...\n"); 2250 2251 ret = smu_start_smc_engine(smu); 2252 if (ret) { 2253 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 2254 return ret; 2255 } 2256 2257 ret = smu_smc_hw_setup(smu); 2258 if (ret) { 2259 dev_err(adev->dev, "Failed to setup smc hw!\n"); 2260 return ret; 2261 } 2262 2263 ret = smu_set_gfx_imu_enable(smu); 2264 if (ret) 2265 return ret; 2266 2267 smu_set_gfx_cgpg(smu, true); 2268 2269 smu->disable_uclk_switch = 0; 2270 2271 adev->pm.dpm_enabled = true; 2272 2273 dev_info(adev->dev, "SMU is resumed successfully!\n"); 2274 2275 return 0; 2276 } 2277 2278 static int smu_display_configuration_change(void *handle, 2279 const struct amd_pp_display_configuration *display_config) 2280 { 2281 struct smu_context *smu = handle; 2282 2283 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2284 return -EOPNOTSUPP; 2285 2286 if (!display_config) 2287 return -EINVAL; 2288 2289 smu_set_min_dcef_deep_sleep(smu, 2290 display_config->min_dcef_deep_sleep_set_clk / 100); 2291 2292 return 0; 2293 } 2294 2295 static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block, 2296 enum amd_clockgating_state state) 2297 { 2298 return 0; 2299 } 2300 2301 static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block, 2302 enum amd_powergating_state state) 2303 { 2304 return 0; 2305 } 2306 2307 static int smu_enable_umd_pstate(void *handle, 2308 enum amd_dpm_forced_level *level) 2309 { 2310 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 2311 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 2312 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 2313 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 2314 2315 struct smu_context *smu = (struct smu_context*)(handle); 2316 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2317 2318 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 2319 return -EINVAL; 2320 2321 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 2322 /* enter umd pstate, save current level, disable gfx cg*/ 2323 if (*level & profile_mode_mask) { 2324 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 2325 smu_gpo_control(smu, false); 2326 smu_gfx_ulv_control(smu, false); 2327 smu_deep_sleep_control(smu, false); 2328 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 2329 } 2330 } else { 2331 /* exit umd pstate, restore level, enable gfx cg*/ 2332 if (!(*level & profile_mode_mask)) { 2333 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 2334 *level = smu_dpm_ctx->saved_dpm_level; 2335 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 2336 smu_deep_sleep_control(smu, true); 2337 smu_gfx_ulv_control(smu, true); 2338 smu_gpo_control(smu, true); 2339 } 2340 } 2341 2342 return 0; 2343 } 2344 2345 static int smu_bump_power_profile_mode(struct smu_context *smu, 2346 long *custom_params, 2347 u32 custom_params_max_idx) 2348 { 2349 u32 workload_mask = 0; 2350 int i, ret = 0; 2351 2352 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 2353 if (smu->workload_refcount[i]) 2354 workload_mask |= 1 << i; 2355 } 2356 2357 if (smu->workload_mask == workload_mask) 2358 return 0; 2359 2360 if (smu->ppt_funcs->set_power_profile_mode) 2361 ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask, 2362 custom_params, 2363 custom_params_max_idx); 2364 2365 if (!ret) 2366 smu->workload_mask = workload_mask; 2367 2368 return ret; 2369 } 2370 2371 static void smu_power_profile_mode_get(struct smu_context *smu, 2372 enum PP_SMC_POWER_PROFILE profile_mode) 2373 { 2374 smu->workload_refcount[profile_mode]++; 2375 } 2376 2377 static void smu_power_profile_mode_put(struct smu_context *smu, 2378 enum PP_SMC_POWER_PROFILE profile_mode) 2379 { 2380 if (smu->workload_refcount[profile_mode]) 2381 smu->workload_refcount[profile_mode]--; 2382 } 2383 2384 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 2385 enum amd_dpm_forced_level level, 2386 bool skip_display_settings) 2387 { 2388 int ret = 0; 2389 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2390 2391 if (!skip_display_settings) { 2392 ret = smu_display_config_changed(smu); 2393 if (ret) { 2394 dev_err(smu->adev->dev, "Failed to change display config!"); 2395 return ret; 2396 } 2397 } 2398 2399 ret = smu_apply_clocks_adjust_rules(smu); 2400 if (ret) { 2401 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 2402 return ret; 2403 } 2404 2405 if (!skip_display_settings) { 2406 ret = smu_notify_smc_display_config(smu); 2407 if (ret) { 2408 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 2409 return ret; 2410 } 2411 } 2412 2413 if (smu_dpm_ctx->dpm_level != level) { 2414 ret = smu_asic_set_performance_level(smu, level); 2415 if (ret) { 2416 if (ret == -EOPNOTSUPP) 2417 dev_info(smu->adev->dev, "set performance level %d not supported", 2418 level); 2419 else 2420 dev_err(smu->adev->dev, "Failed to set performance level %d", 2421 level); 2422 return ret; 2423 } 2424 2425 /* update the saved copy */ 2426 smu_dpm_ctx->dpm_level = level; 2427 } 2428 2429 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 2430 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) 2431 smu_bump_power_profile_mode(smu, NULL, 0); 2432 2433 return ret; 2434 } 2435 2436 static int smu_handle_task(struct smu_context *smu, 2437 enum amd_dpm_forced_level level, 2438 enum amd_pp_task task_id) 2439 { 2440 int ret = 0; 2441 2442 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2443 return -EOPNOTSUPP; 2444 2445 switch (task_id) { 2446 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 2447 ret = smu_pre_display_config_changed(smu); 2448 if (ret) 2449 return ret; 2450 ret = smu_adjust_power_state_dynamic(smu, level, false); 2451 break; 2452 case AMD_PP_TASK_COMPLETE_INIT: 2453 ret = smu_adjust_power_state_dynamic(smu, level, true); 2454 break; 2455 case AMD_PP_TASK_READJUST_POWER_STATE: 2456 ret = smu_adjust_power_state_dynamic(smu, level, true); 2457 break; 2458 default: 2459 break; 2460 } 2461 2462 return ret; 2463 } 2464 2465 static int smu_handle_dpm_task(void *handle, 2466 enum amd_pp_task task_id, 2467 enum amd_pm_state_type *user_state) 2468 { 2469 struct smu_context *smu = handle; 2470 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 2471 2472 return smu_handle_task(smu, smu_dpm->dpm_level, task_id); 2473 2474 } 2475 2476 static int smu_switch_power_profile(void *handle, 2477 enum PP_SMC_POWER_PROFILE type, 2478 bool enable) 2479 { 2480 struct smu_context *smu = handle; 2481 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2482 int ret; 2483 2484 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2485 return -EOPNOTSUPP; 2486 2487 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 2488 return -EINVAL; 2489 2490 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 2491 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 2492 if (enable) 2493 smu_power_profile_mode_get(smu, type); 2494 else 2495 smu_power_profile_mode_put(smu, type); 2496 /* don't switch the active workload when paused */ 2497 if (smu->pause_workload) 2498 ret = 0; 2499 else 2500 ret = smu_bump_power_profile_mode(smu, NULL, 0); 2501 if (ret) { 2502 if (enable) 2503 smu_power_profile_mode_put(smu, type); 2504 else 2505 smu_power_profile_mode_get(smu, type); 2506 return ret; 2507 } 2508 } 2509 2510 return 0; 2511 } 2512 2513 static int smu_pause_power_profile(void *handle, 2514 bool pause) 2515 { 2516 struct smu_context *smu = handle; 2517 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2518 u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 2519 int ret; 2520 2521 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2522 return -EOPNOTSUPP; 2523 2524 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 2525 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 2526 smu->pause_workload = pause; 2527 2528 /* force to bootup default profile */ 2529 if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode) 2530 ret = smu->ppt_funcs->set_power_profile_mode(smu, 2531 workload_mask, 2532 NULL, 2533 0); 2534 else 2535 ret = smu_bump_power_profile_mode(smu, NULL, 0); 2536 return ret; 2537 } 2538 2539 return 0; 2540 } 2541 2542 static enum amd_dpm_forced_level smu_get_performance_level(void *handle) 2543 { 2544 struct smu_context *smu = handle; 2545 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2546 2547 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2548 return -EOPNOTSUPP; 2549 2550 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 2551 return -EINVAL; 2552 2553 return smu_dpm_ctx->dpm_level; 2554 } 2555 2556 static int smu_force_performance_level(void *handle, 2557 enum amd_dpm_forced_level level) 2558 { 2559 struct smu_context *smu = handle; 2560 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2561 int ret = 0; 2562 2563 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2564 return -EOPNOTSUPP; 2565 2566 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 2567 return -EINVAL; 2568 2569 ret = smu_enable_umd_pstate(smu, &level); 2570 if (ret) 2571 return ret; 2572 2573 ret = smu_handle_task(smu, level, 2574 AMD_PP_TASK_READJUST_POWER_STATE); 2575 2576 /* reset user dpm clock state */ 2577 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 2578 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); 2579 smu->user_dpm_profile.clk_dependency = 0; 2580 } 2581 2582 return ret; 2583 } 2584 2585 static int smu_set_display_count(void *handle, uint32_t count) 2586 { 2587 struct smu_context *smu = handle; 2588 2589 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2590 return -EOPNOTSUPP; 2591 2592 return smu_init_display_count(smu, count); 2593 } 2594 2595 static int smu_force_smuclk_levels(struct smu_context *smu, 2596 enum smu_clk_type clk_type, 2597 uint32_t mask) 2598 { 2599 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2600 int ret = 0; 2601 2602 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2603 return -EOPNOTSUPP; 2604 2605 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 2606 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 2607 return -EINVAL; 2608 } 2609 2610 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { 2611 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 2612 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2613 smu->user_dpm_profile.clk_mask[clk_type] = mask; 2614 smu_set_user_clk_dependencies(smu, clk_type); 2615 } 2616 } 2617 2618 return ret; 2619 } 2620 2621 static int smu_force_ppclk_levels(void *handle, 2622 enum pp_clock_type type, 2623 uint32_t mask) 2624 { 2625 struct smu_context *smu = handle; 2626 enum smu_clk_type clk_type; 2627 2628 switch (type) { 2629 case PP_SCLK: 2630 clk_type = SMU_SCLK; break; 2631 case PP_MCLK: 2632 clk_type = SMU_MCLK; break; 2633 case PP_PCIE: 2634 clk_type = SMU_PCIE; break; 2635 case PP_SOCCLK: 2636 clk_type = SMU_SOCCLK; break; 2637 case PP_FCLK: 2638 clk_type = SMU_FCLK; break; 2639 case PP_DCEFCLK: 2640 clk_type = SMU_DCEFCLK; break; 2641 case PP_VCLK: 2642 clk_type = SMU_VCLK; break; 2643 case PP_VCLK1: 2644 clk_type = SMU_VCLK1; break; 2645 case PP_DCLK: 2646 clk_type = SMU_DCLK; break; 2647 case PP_DCLK1: 2648 clk_type = SMU_DCLK1; break; 2649 case OD_SCLK: 2650 clk_type = SMU_OD_SCLK; break; 2651 case OD_MCLK: 2652 clk_type = SMU_OD_MCLK; break; 2653 case OD_VDDC_CURVE: 2654 clk_type = SMU_OD_VDDC_CURVE; break; 2655 case OD_RANGE: 2656 clk_type = SMU_OD_RANGE; break; 2657 default: 2658 return -EINVAL; 2659 } 2660 2661 return smu_force_smuclk_levels(smu, clk_type, mask); 2662 } 2663 2664 /* 2665 * On system suspending or resetting, the dpm_enabled 2666 * flag will be cleared. So that those SMU services which 2667 * are not supported will be gated. 2668 * However, the mp1 state setting should still be granted 2669 * even if the dpm_enabled cleared. 2670 */ 2671 static int smu_set_mp1_state(void *handle, 2672 enum pp_mp1_state mp1_state) 2673 { 2674 struct smu_context *smu = handle; 2675 int ret = 0; 2676 2677 if (!smu->pm_enabled) 2678 return -EOPNOTSUPP; 2679 2680 if (smu->ppt_funcs && 2681 smu->ppt_funcs->set_mp1_state) 2682 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); 2683 2684 return ret; 2685 } 2686 2687 static int smu_set_df_cstate(void *handle, 2688 enum pp_df_cstate state) 2689 { 2690 struct smu_context *smu = handle; 2691 int ret = 0; 2692 2693 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2694 return -EOPNOTSUPP; 2695 2696 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 2697 return 0; 2698 2699 ret = smu->ppt_funcs->set_df_cstate(smu, state); 2700 if (ret) 2701 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 2702 2703 return ret; 2704 } 2705 2706 int smu_write_watermarks_table(struct smu_context *smu) 2707 { 2708 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2709 return -EOPNOTSUPP; 2710 2711 return smu_set_watermarks_table(smu, NULL); 2712 } 2713 2714 static int smu_set_watermarks_for_clock_ranges(void *handle, 2715 struct pp_smu_wm_range_sets *clock_ranges) 2716 { 2717 struct smu_context *smu = handle; 2718 2719 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2720 return -EOPNOTSUPP; 2721 2722 if (smu->disable_watermark) 2723 return 0; 2724 2725 return smu_set_watermarks_table(smu, clock_ranges); 2726 } 2727 2728 int smu_set_ac_dc(struct smu_context *smu) 2729 { 2730 int ret = 0; 2731 2732 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2733 return -EOPNOTSUPP; 2734 2735 /* controlled by firmware */ 2736 if (smu->dc_controlled_by_gpio) 2737 return 0; 2738 2739 ret = smu_set_power_source(smu, 2740 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 2741 SMU_POWER_SOURCE_DC); 2742 if (ret) 2743 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 2744 smu->adev->pm.ac_power ? "AC" : "DC"); 2745 2746 return ret; 2747 } 2748 2749 const struct amd_ip_funcs smu_ip_funcs = { 2750 .name = "smu", 2751 .early_init = smu_early_init, 2752 .late_init = smu_late_init, 2753 .sw_init = smu_sw_init, 2754 .sw_fini = smu_sw_fini, 2755 .hw_init = smu_hw_init, 2756 .hw_fini = smu_hw_fini, 2757 .late_fini = smu_late_fini, 2758 .suspend = smu_suspend, 2759 .resume = smu_resume, 2760 .is_idle = NULL, 2761 .check_soft_reset = NULL, 2762 .wait_for_idle = NULL, 2763 .soft_reset = NULL, 2764 .set_clockgating_state = smu_set_clockgating_state, 2765 .set_powergating_state = smu_set_powergating_state, 2766 }; 2767 2768 const struct amdgpu_ip_block_version smu_v11_0_ip_block = { 2769 .type = AMD_IP_BLOCK_TYPE_SMC, 2770 .major = 11, 2771 .minor = 0, 2772 .rev = 0, 2773 .funcs = &smu_ip_funcs, 2774 }; 2775 2776 const struct amdgpu_ip_block_version smu_v12_0_ip_block = { 2777 .type = AMD_IP_BLOCK_TYPE_SMC, 2778 .major = 12, 2779 .minor = 0, 2780 .rev = 0, 2781 .funcs = &smu_ip_funcs, 2782 }; 2783 2784 const struct amdgpu_ip_block_version smu_v13_0_ip_block = { 2785 .type = AMD_IP_BLOCK_TYPE_SMC, 2786 .major = 13, 2787 .minor = 0, 2788 .rev = 0, 2789 .funcs = &smu_ip_funcs, 2790 }; 2791 2792 const struct amdgpu_ip_block_version smu_v14_0_ip_block = { 2793 .type = AMD_IP_BLOCK_TYPE_SMC, 2794 .major = 14, 2795 .minor = 0, 2796 .rev = 0, 2797 .funcs = &smu_ip_funcs, 2798 }; 2799 2800 static int smu_load_microcode(void *handle) 2801 { 2802 struct smu_context *smu = handle; 2803 struct amdgpu_device *adev = smu->adev; 2804 int ret = 0; 2805 2806 if (!smu->pm_enabled) 2807 return -EOPNOTSUPP; 2808 2809 /* This should be used for non PSP loading */ 2810 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 2811 return 0; 2812 2813 if (smu->ppt_funcs->load_microcode) { 2814 ret = smu->ppt_funcs->load_microcode(smu); 2815 if (ret) { 2816 dev_err(adev->dev, "Load microcode failed\n"); 2817 return ret; 2818 } 2819 } 2820 2821 if (smu->ppt_funcs->check_fw_status) { 2822 ret = smu->ppt_funcs->check_fw_status(smu); 2823 if (ret) { 2824 dev_err(adev->dev, "SMC is not ready\n"); 2825 return ret; 2826 } 2827 } 2828 2829 return ret; 2830 } 2831 2832 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2833 { 2834 int ret = 0; 2835 2836 if (smu->ppt_funcs->set_gfx_cgpg) 2837 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2838 2839 return ret; 2840 } 2841 2842 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) 2843 { 2844 struct smu_context *smu = handle; 2845 int ret = 0; 2846 2847 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2848 return -EOPNOTSUPP; 2849 2850 if (!smu->ppt_funcs->set_fan_speed_rpm) 2851 return -EOPNOTSUPP; 2852 2853 if (speed == U32_MAX) 2854 return -EINVAL; 2855 2856 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 2857 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2858 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; 2859 smu->user_dpm_profile.fan_speed_rpm = speed; 2860 2861 /* Override custom PWM setting as they cannot co-exist */ 2862 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; 2863 smu->user_dpm_profile.fan_speed_pwm = 0; 2864 } 2865 2866 return ret; 2867 } 2868 2869 /** 2870 * smu_get_power_limit - Request one of the SMU Power Limits 2871 * 2872 * @handle: pointer to smu context 2873 * @limit: requested limit is written back to this variable 2874 * @pp_limit_level: &pp_power_limit_level which limit of the power to return 2875 * @pp_power_type: &pp_power_type type of power 2876 * Return: 0 on success, <0 on error 2877 * 2878 */ 2879 int smu_get_power_limit(void *handle, 2880 uint32_t *limit, 2881 enum pp_power_limit_level pp_limit_level, 2882 enum pp_power_type pp_power_type) 2883 { 2884 struct smu_context *smu = handle; 2885 struct amdgpu_device *adev = smu->adev; 2886 enum smu_ppt_limit_level limit_level; 2887 uint32_t limit_type; 2888 int ret = 0; 2889 2890 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2891 return -EOPNOTSUPP; 2892 2893 switch (pp_power_type) { 2894 case PP_PWR_TYPE_SUSTAINED: 2895 limit_type = SMU_DEFAULT_PPT_LIMIT; 2896 break; 2897 case PP_PWR_TYPE_FAST: 2898 limit_type = SMU_FAST_PPT_LIMIT; 2899 break; 2900 default: 2901 return -EOPNOTSUPP; 2902 } 2903 2904 switch (pp_limit_level) { 2905 case PP_PWR_LIMIT_CURRENT: 2906 limit_level = SMU_PPT_LIMIT_CURRENT; 2907 break; 2908 case PP_PWR_LIMIT_DEFAULT: 2909 limit_level = SMU_PPT_LIMIT_DEFAULT; 2910 break; 2911 case PP_PWR_LIMIT_MAX: 2912 limit_level = SMU_PPT_LIMIT_MAX; 2913 break; 2914 case PP_PWR_LIMIT_MIN: 2915 limit_level = SMU_PPT_LIMIT_MIN; 2916 break; 2917 default: 2918 return -EOPNOTSUPP; 2919 } 2920 2921 if (limit_type != SMU_DEFAULT_PPT_LIMIT) { 2922 if (smu->ppt_funcs->get_ppt_limit) 2923 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); 2924 } else { 2925 switch (limit_level) { 2926 case SMU_PPT_LIMIT_CURRENT: 2927 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2928 case IP_VERSION(13, 0, 2): 2929 case IP_VERSION(13, 0, 6): 2930 case IP_VERSION(13, 0, 12): 2931 case IP_VERSION(13, 0, 14): 2932 case IP_VERSION(11, 0, 7): 2933 case IP_VERSION(11, 0, 11): 2934 case IP_VERSION(11, 0, 12): 2935 case IP_VERSION(11, 0, 13): 2936 ret = smu_get_asic_power_limits(smu, 2937 &smu->current_power_limit, 2938 NULL, NULL, NULL); 2939 break; 2940 default: 2941 break; 2942 } 2943 *limit = smu->current_power_limit; 2944 break; 2945 case SMU_PPT_LIMIT_DEFAULT: 2946 *limit = smu->default_power_limit; 2947 break; 2948 case SMU_PPT_LIMIT_MAX: 2949 *limit = smu->max_power_limit; 2950 break; 2951 case SMU_PPT_LIMIT_MIN: 2952 *limit = smu->min_power_limit; 2953 break; 2954 default: 2955 return -EINVAL; 2956 } 2957 } 2958 2959 return ret; 2960 } 2961 2962 static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) 2963 { 2964 struct smu_context *smu = handle; 2965 int ret = 0; 2966 2967 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2968 return -EOPNOTSUPP; 2969 2970 if (limit_type == SMU_DEFAULT_PPT_LIMIT) { 2971 if (!limit) 2972 limit = smu->current_power_limit; 2973 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { 2974 dev_err(smu->adev->dev, 2975 "New power limit (%d) is out of range [%d,%d]\n", 2976 limit, smu->min_power_limit, smu->max_power_limit); 2977 return -EINVAL; 2978 } 2979 } 2980 2981 if (smu->ppt_funcs->set_power_limit) { 2982 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 2983 if (ret) 2984 return ret; 2985 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2986 smu->user_dpm_profile.power_limits[limit_type] = limit; 2987 } 2988 2989 return 0; 2990 } 2991 2992 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 2993 { 2994 int ret = 0; 2995 2996 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2997 return -EOPNOTSUPP; 2998 2999 if (smu->ppt_funcs->print_clk_levels) 3000 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 3001 3002 return ret; 3003 } 3004 3005 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) 3006 { 3007 enum smu_clk_type clk_type; 3008 3009 switch (type) { 3010 case PP_SCLK: 3011 clk_type = SMU_SCLK; break; 3012 case PP_MCLK: 3013 clk_type = SMU_MCLK; break; 3014 case PP_PCIE: 3015 clk_type = SMU_PCIE; break; 3016 case PP_SOCCLK: 3017 clk_type = SMU_SOCCLK; break; 3018 case PP_FCLK: 3019 clk_type = SMU_FCLK; break; 3020 case PP_DCEFCLK: 3021 clk_type = SMU_DCEFCLK; break; 3022 case PP_VCLK: 3023 clk_type = SMU_VCLK; break; 3024 case PP_VCLK1: 3025 clk_type = SMU_VCLK1; break; 3026 case PP_DCLK: 3027 clk_type = SMU_DCLK; break; 3028 case PP_DCLK1: 3029 clk_type = SMU_DCLK1; break; 3030 case PP_ISPICLK: 3031 clk_type = SMU_ISPICLK; 3032 break; 3033 case PP_ISPXCLK: 3034 clk_type = SMU_ISPXCLK; 3035 break; 3036 case OD_SCLK: 3037 clk_type = SMU_OD_SCLK; break; 3038 case OD_MCLK: 3039 clk_type = SMU_OD_MCLK; break; 3040 case OD_VDDC_CURVE: 3041 clk_type = SMU_OD_VDDC_CURVE; break; 3042 case OD_RANGE: 3043 clk_type = SMU_OD_RANGE; break; 3044 case OD_VDDGFX_OFFSET: 3045 clk_type = SMU_OD_VDDGFX_OFFSET; break; 3046 case OD_CCLK: 3047 clk_type = SMU_OD_CCLK; break; 3048 case OD_FAN_CURVE: 3049 clk_type = SMU_OD_FAN_CURVE; break; 3050 case OD_ACOUSTIC_LIMIT: 3051 clk_type = SMU_OD_ACOUSTIC_LIMIT; break; 3052 case OD_ACOUSTIC_TARGET: 3053 clk_type = SMU_OD_ACOUSTIC_TARGET; break; 3054 case OD_FAN_TARGET_TEMPERATURE: 3055 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break; 3056 case OD_FAN_MINIMUM_PWM: 3057 clk_type = SMU_OD_FAN_MINIMUM_PWM; break; 3058 case OD_FAN_ZERO_RPM_ENABLE: 3059 clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break; 3060 case OD_FAN_ZERO_RPM_STOP_TEMP: 3061 clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break; 3062 default: 3063 clk_type = SMU_CLK_COUNT; break; 3064 } 3065 3066 return clk_type; 3067 } 3068 3069 static int smu_print_ppclk_levels(void *handle, 3070 enum pp_clock_type type, 3071 char *buf) 3072 { 3073 struct smu_context *smu = handle; 3074 enum smu_clk_type clk_type; 3075 3076 clk_type = smu_convert_to_smuclk(type); 3077 if (clk_type == SMU_CLK_COUNT) 3078 return -EINVAL; 3079 3080 return smu_print_smuclk_levels(smu, clk_type, buf); 3081 } 3082 3083 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset) 3084 { 3085 struct smu_context *smu = handle; 3086 enum smu_clk_type clk_type; 3087 3088 clk_type = smu_convert_to_smuclk(type); 3089 if (clk_type == SMU_CLK_COUNT) 3090 return -EINVAL; 3091 3092 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3093 return -EOPNOTSUPP; 3094 3095 if (!smu->ppt_funcs->emit_clk_levels) 3096 return -ENOENT; 3097 3098 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); 3099 3100 } 3101 3102 static int smu_od_edit_dpm_table(void *handle, 3103 enum PP_OD_DPM_TABLE_COMMAND type, 3104 long *input, uint32_t size) 3105 { 3106 struct smu_context *smu = handle; 3107 int ret = 0; 3108 3109 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3110 return -EOPNOTSUPP; 3111 3112 if (smu->ppt_funcs->od_edit_dpm_table) { 3113 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 3114 } 3115 3116 return ret; 3117 } 3118 3119 static int smu_read_sensor(void *handle, 3120 int sensor, 3121 void *data, 3122 int *size_arg) 3123 { 3124 struct smu_context *smu = handle; 3125 struct amdgpu_device *adev = smu->adev; 3126 struct smu_umd_pstate_table *pstate_table = 3127 &smu->pstate_table; 3128 int i, ret = 0; 3129 uint32_t *size, size_val; 3130 3131 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3132 return -EOPNOTSUPP; 3133 3134 if (!data || !size_arg) 3135 return -EINVAL; 3136 3137 size_val = *size_arg; 3138 size = &size_val; 3139 3140 if (smu->ppt_funcs->read_sensor) 3141 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 3142 goto unlock; 3143 3144 switch (sensor) { 3145 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 3146 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 3147 *size = 4; 3148 break; 3149 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 3150 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 3151 *size = 4; 3152 break; 3153 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 3154 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100; 3155 *size = 4; 3156 break; 3157 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 3158 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100; 3159 *size = 4; 3160 break; 3161 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 3162 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); 3163 *size = 8; 3164 break; 3165 case AMDGPU_PP_SENSOR_UVD_POWER: 3166 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 3167 *size = 4; 3168 break; 3169 case AMDGPU_PP_SENSOR_VCE_POWER: 3170 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 3171 *size = 4; 3172 break; 3173 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 3174 *(uint32_t *)data = 0; 3175 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 3176 if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) { 3177 *(uint32_t *)data = 1; 3178 break; 3179 } 3180 } 3181 *size = 4; 3182 break; 3183 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 3184 *(uint32_t *)data = 0; 3185 *size = 4; 3186 break; 3187 default: 3188 *size = 0; 3189 ret = -EOPNOTSUPP; 3190 break; 3191 } 3192 3193 unlock: 3194 // assign uint32_t to int 3195 *size_arg = size_val; 3196 3197 return ret; 3198 } 3199 3200 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit) 3201 { 3202 int ret = -EOPNOTSUPP; 3203 struct smu_context *smu = handle; 3204 3205 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit) 3206 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit); 3207 3208 return ret; 3209 } 3210 3211 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit) 3212 { 3213 int ret = -EOPNOTSUPP; 3214 struct smu_context *smu = handle; 3215 3216 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit) 3217 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit); 3218 3219 return ret; 3220 } 3221 3222 static int smu_get_power_profile_mode(void *handle, char *buf) 3223 { 3224 struct smu_context *smu = handle; 3225 3226 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 3227 !smu->ppt_funcs->get_power_profile_mode) 3228 return -EOPNOTSUPP; 3229 if (!buf) 3230 return -EINVAL; 3231 3232 return smu->ppt_funcs->get_power_profile_mode(smu, buf); 3233 } 3234 3235 static int smu_set_power_profile_mode(void *handle, 3236 long *param, 3237 uint32_t param_size) 3238 { 3239 struct smu_context *smu = handle; 3240 bool custom = false; 3241 int ret = 0; 3242 3243 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 3244 !smu->ppt_funcs->set_power_profile_mode) 3245 return -EOPNOTSUPP; 3246 3247 if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) { 3248 custom = true; 3249 /* clear frontend mask so custom changes propogate */ 3250 smu->workload_mask = 0; 3251 } 3252 3253 if ((param[param_size] != smu->power_profile_mode) || custom) { 3254 /* clear the old user preference */ 3255 smu_power_profile_mode_put(smu, smu->power_profile_mode); 3256 /* set the new user preference */ 3257 smu_power_profile_mode_get(smu, param[param_size]); 3258 ret = smu_bump_power_profile_mode(smu, 3259 custom ? param : NULL, 3260 custom ? param_size : 0); 3261 if (ret) 3262 smu_power_profile_mode_put(smu, param[param_size]); 3263 else 3264 /* store the user's preference */ 3265 smu->power_profile_mode = param[param_size]; 3266 } 3267 3268 return ret; 3269 } 3270 3271 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) 3272 { 3273 struct smu_context *smu = handle; 3274 3275 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3276 return -EOPNOTSUPP; 3277 3278 if (!smu->ppt_funcs->get_fan_control_mode) 3279 return -EOPNOTSUPP; 3280 3281 if (!fan_mode) 3282 return -EINVAL; 3283 3284 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); 3285 3286 return 0; 3287 } 3288 3289 static int smu_set_fan_control_mode(void *handle, u32 value) 3290 { 3291 struct smu_context *smu = handle; 3292 int ret = 0; 3293 3294 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3295 return -EOPNOTSUPP; 3296 3297 if (!smu->ppt_funcs->set_fan_control_mode) 3298 return -EOPNOTSUPP; 3299 3300 if (value == U32_MAX) 3301 return -EINVAL; 3302 3303 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 3304 if (ret) 3305 goto out; 3306 3307 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 3308 smu->user_dpm_profile.fan_mode = value; 3309 3310 /* reset user dpm fan speed */ 3311 if (value != AMD_FAN_CTRL_MANUAL) { 3312 smu->user_dpm_profile.fan_speed_pwm = 0; 3313 smu->user_dpm_profile.fan_speed_rpm = 0; 3314 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); 3315 } 3316 } 3317 3318 out: 3319 return ret; 3320 } 3321 3322 static int smu_get_fan_speed_pwm(void *handle, u32 *speed) 3323 { 3324 struct smu_context *smu = handle; 3325 int ret = 0; 3326 3327 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3328 return -EOPNOTSUPP; 3329 3330 if (!smu->ppt_funcs->get_fan_speed_pwm) 3331 return -EOPNOTSUPP; 3332 3333 if (!speed) 3334 return -EINVAL; 3335 3336 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); 3337 3338 return ret; 3339 } 3340 3341 static int smu_set_fan_speed_pwm(void *handle, u32 speed) 3342 { 3343 struct smu_context *smu = handle; 3344 int ret = 0; 3345 3346 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3347 return -EOPNOTSUPP; 3348 3349 if (!smu->ppt_funcs->set_fan_speed_pwm) 3350 return -EOPNOTSUPP; 3351 3352 if (speed == U32_MAX) 3353 return -EINVAL; 3354 3355 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); 3356 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 3357 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; 3358 smu->user_dpm_profile.fan_speed_pwm = speed; 3359 3360 /* Override custom RPM setting as they cannot co-exist */ 3361 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; 3362 smu->user_dpm_profile.fan_speed_rpm = 0; 3363 } 3364 3365 return ret; 3366 } 3367 3368 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) 3369 { 3370 struct smu_context *smu = handle; 3371 int ret = 0; 3372 3373 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3374 return -EOPNOTSUPP; 3375 3376 if (!smu->ppt_funcs->get_fan_speed_rpm) 3377 return -EOPNOTSUPP; 3378 3379 if (!speed) 3380 return -EINVAL; 3381 3382 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 3383 3384 return ret; 3385 } 3386 3387 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) 3388 { 3389 struct smu_context *smu = handle; 3390 3391 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3392 return -EOPNOTSUPP; 3393 3394 return smu_set_min_dcef_deep_sleep(smu, clk); 3395 } 3396 3397 static int smu_get_clock_by_type_with_latency(void *handle, 3398 enum amd_pp_clock_type type, 3399 struct pp_clock_levels_with_latency *clocks) 3400 { 3401 struct smu_context *smu = handle; 3402 enum smu_clk_type clk_type; 3403 int ret = 0; 3404 3405 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3406 return -EOPNOTSUPP; 3407 3408 if (smu->ppt_funcs->get_clock_by_type_with_latency) { 3409 switch (type) { 3410 case amd_pp_sys_clock: 3411 clk_type = SMU_GFXCLK; 3412 break; 3413 case amd_pp_mem_clock: 3414 clk_type = SMU_MCLK; 3415 break; 3416 case amd_pp_dcef_clock: 3417 clk_type = SMU_DCEFCLK; 3418 break; 3419 case amd_pp_disp_clock: 3420 clk_type = SMU_DISPCLK; 3421 break; 3422 default: 3423 dev_err(smu->adev->dev, "Invalid clock type!\n"); 3424 return -EINVAL; 3425 } 3426 3427 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 3428 } 3429 3430 return ret; 3431 } 3432 3433 static int smu_display_clock_voltage_request(void *handle, 3434 struct pp_display_clock_request *clock_req) 3435 { 3436 struct smu_context *smu = handle; 3437 int ret = 0; 3438 3439 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3440 return -EOPNOTSUPP; 3441 3442 if (smu->ppt_funcs->display_clock_voltage_request) 3443 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 3444 3445 return ret; 3446 } 3447 3448 3449 static int smu_display_disable_memory_clock_switch(void *handle, 3450 bool disable_memory_clock_switch) 3451 { 3452 struct smu_context *smu = handle; 3453 int ret = -EINVAL; 3454 3455 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3456 return -EOPNOTSUPP; 3457 3458 if (smu->ppt_funcs->display_disable_memory_clock_switch) 3459 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 3460 3461 return ret; 3462 } 3463 3464 static int smu_set_xgmi_pstate(void *handle, 3465 uint32_t pstate) 3466 { 3467 struct smu_context *smu = handle; 3468 int ret = 0; 3469 3470 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3471 return -EOPNOTSUPP; 3472 3473 if (smu->ppt_funcs->set_xgmi_pstate) 3474 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 3475 3476 if (ret) 3477 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 3478 3479 return ret; 3480 } 3481 3482 static int smu_get_baco_capability(void *handle) 3483 { 3484 struct smu_context *smu = handle; 3485 3486 if (!smu->pm_enabled) 3487 return false; 3488 3489 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support) 3490 return false; 3491 3492 return smu->ppt_funcs->get_bamaco_support(smu); 3493 } 3494 3495 static int smu_baco_set_state(void *handle, int state) 3496 { 3497 struct smu_context *smu = handle; 3498 int ret = 0; 3499 3500 if (!smu->pm_enabled) 3501 return -EOPNOTSUPP; 3502 3503 if (state == 0) { 3504 if (smu->ppt_funcs->baco_exit) 3505 ret = smu->ppt_funcs->baco_exit(smu); 3506 } else if (state == 1) { 3507 if (smu->ppt_funcs->baco_enter) 3508 ret = smu->ppt_funcs->baco_enter(smu); 3509 } else { 3510 return -EINVAL; 3511 } 3512 3513 if (ret) 3514 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", 3515 (state)?"enter":"exit"); 3516 3517 return ret; 3518 } 3519 3520 bool smu_mode1_reset_is_support(struct smu_context *smu) 3521 { 3522 bool ret = false; 3523 3524 if (!smu->pm_enabled) 3525 return false; 3526 3527 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 3528 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 3529 3530 return ret; 3531 } 3532 3533 bool smu_link_reset_is_support(struct smu_context *smu) 3534 { 3535 if (!smu->pm_enabled) 3536 return false; 3537 3538 return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET); 3539 } 3540 3541 int smu_mode1_reset(struct smu_context *smu) 3542 { 3543 int ret = 0; 3544 3545 if (!smu->pm_enabled) 3546 return -EOPNOTSUPP; 3547 3548 if (smu->ppt_funcs->mode1_reset) 3549 ret = smu->ppt_funcs->mode1_reset(smu); 3550 3551 return ret; 3552 } 3553 3554 static int smu_mode2_reset(void *handle) 3555 { 3556 struct smu_context *smu = handle; 3557 int ret = 0; 3558 3559 if (!smu->pm_enabled) 3560 return -EOPNOTSUPP; 3561 3562 if (smu->ppt_funcs->mode2_reset) 3563 ret = smu->ppt_funcs->mode2_reset(smu); 3564 3565 if (ret) 3566 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 3567 3568 return ret; 3569 } 3570 3571 int smu_link_reset(struct smu_context *smu) 3572 { 3573 int ret = 0; 3574 3575 if (!smu->pm_enabled) 3576 return -EOPNOTSUPP; 3577 3578 if (smu->ppt_funcs->link_reset) 3579 ret = smu->ppt_funcs->link_reset(smu); 3580 3581 return ret; 3582 } 3583 3584 static int smu_enable_gfx_features(void *handle) 3585 { 3586 struct smu_context *smu = handle; 3587 int ret = 0; 3588 3589 if (!smu->pm_enabled) 3590 return -EOPNOTSUPP; 3591 3592 if (smu->ppt_funcs->enable_gfx_features) 3593 ret = smu->ppt_funcs->enable_gfx_features(smu); 3594 3595 if (ret) 3596 dev_err(smu->adev->dev, "enable gfx features failed!\n"); 3597 3598 return ret; 3599 } 3600 3601 static int smu_get_max_sustainable_clocks_by_dc(void *handle, 3602 struct pp_smu_nv_clock_table *max_clocks) 3603 { 3604 struct smu_context *smu = handle; 3605 int ret = 0; 3606 3607 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3608 return -EOPNOTSUPP; 3609 3610 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 3611 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 3612 3613 return ret; 3614 } 3615 3616 static int smu_get_uclk_dpm_states(void *handle, 3617 unsigned int *clock_values_in_khz, 3618 unsigned int *num_states) 3619 { 3620 struct smu_context *smu = handle; 3621 int ret = 0; 3622 3623 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3624 return -EOPNOTSUPP; 3625 3626 if (smu->ppt_funcs->get_uclk_dpm_states) 3627 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 3628 3629 return ret; 3630 } 3631 3632 static enum amd_pm_state_type smu_get_current_power_state(void *handle) 3633 { 3634 struct smu_context *smu = handle; 3635 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 3636 3637 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3638 return -EOPNOTSUPP; 3639 3640 if (smu->ppt_funcs->get_current_power_state) 3641 pm_state = smu->ppt_funcs->get_current_power_state(smu); 3642 3643 return pm_state; 3644 } 3645 3646 static int smu_get_dpm_clock_table(void *handle, 3647 struct dpm_clocks *clock_table) 3648 { 3649 struct smu_context *smu = handle; 3650 int ret = 0; 3651 3652 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3653 return -EOPNOTSUPP; 3654 3655 if (smu->ppt_funcs->get_dpm_clock_table) 3656 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 3657 3658 return ret; 3659 } 3660 3661 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) 3662 { 3663 struct smu_context *smu = handle; 3664 3665 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3666 return -EOPNOTSUPP; 3667 3668 if (!smu->ppt_funcs->get_gpu_metrics) 3669 return -EOPNOTSUPP; 3670 3671 return smu->ppt_funcs->get_gpu_metrics(smu, table); 3672 } 3673 3674 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics, 3675 size_t size) 3676 { 3677 struct smu_context *smu = handle; 3678 3679 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3680 return -EOPNOTSUPP; 3681 3682 if (!smu->ppt_funcs->get_pm_metrics) 3683 return -EOPNOTSUPP; 3684 3685 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size); 3686 } 3687 3688 static int smu_enable_mgpu_fan_boost(void *handle) 3689 { 3690 struct smu_context *smu = handle; 3691 int ret = 0; 3692 3693 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3694 return -EOPNOTSUPP; 3695 3696 if (smu->ppt_funcs->enable_mgpu_fan_boost) 3697 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 3698 3699 return ret; 3700 } 3701 3702 static int smu_gfx_state_change_set(void *handle, 3703 uint32_t state) 3704 { 3705 struct smu_context *smu = handle; 3706 int ret = 0; 3707 3708 if (smu->ppt_funcs->gfx_state_change_set) 3709 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 3710 3711 return ret; 3712 } 3713 3714 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) 3715 { 3716 int ret = 0; 3717 3718 if (smu->ppt_funcs->smu_handle_passthrough_sbr) 3719 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); 3720 3721 return ret; 3722 } 3723 3724 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) 3725 { 3726 int ret = -EOPNOTSUPP; 3727 3728 if (smu->ppt_funcs && 3729 smu->ppt_funcs->get_ecc_info) 3730 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); 3731 3732 return ret; 3733 3734 } 3735 3736 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) 3737 { 3738 struct smu_context *smu = handle; 3739 struct smu_table_context *smu_table = &smu->smu_table; 3740 struct smu_table *memory_pool = &smu_table->memory_pool; 3741 3742 if (!addr || !size) 3743 return -EINVAL; 3744 3745 *addr = NULL; 3746 *size = 0; 3747 if (memory_pool->bo) { 3748 *addr = memory_pool->cpu_addr; 3749 *size = memory_pool->size; 3750 } 3751 3752 return 0; 3753 } 3754 3755 static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf, 3756 size_t *size) 3757 { 3758 size_t offset = *size; 3759 int level; 3760 3761 for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) { 3762 if (level == policy->current_level) 3763 offset += sysfs_emit_at(sysbuf, offset, 3764 "%d : %s*\n", level, 3765 policy->desc->get_desc(policy, level)); 3766 else 3767 offset += sysfs_emit_at(sysbuf, offset, 3768 "%d : %s\n", level, 3769 policy->desc->get_desc(policy, level)); 3770 } 3771 3772 *size = offset; 3773 } 3774 3775 ssize_t smu_get_pm_policy_info(struct smu_context *smu, 3776 enum pp_pm_policy p_type, char *sysbuf) 3777 { 3778 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; 3779 struct smu_dpm_policy_ctxt *policy_ctxt; 3780 struct smu_dpm_policy *dpm_policy; 3781 size_t offset = 0; 3782 3783 policy_ctxt = dpm_ctxt->dpm_policies; 3784 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || 3785 !policy_ctxt->policy_mask) 3786 return -EOPNOTSUPP; 3787 3788 if (p_type == PP_PM_POLICY_NONE) 3789 return -EINVAL; 3790 3791 dpm_policy = smu_get_pm_policy(smu, p_type); 3792 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc) 3793 return -ENOENT; 3794 3795 if (!sysbuf) 3796 return -EINVAL; 3797 3798 smu_print_dpm_policy(dpm_policy, sysbuf, &offset); 3799 3800 return offset; 3801 } 3802 3803 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, 3804 enum pp_pm_policy p_type) 3805 { 3806 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; 3807 struct smu_dpm_policy_ctxt *policy_ctxt; 3808 int i; 3809 3810 policy_ctxt = dpm_ctxt->dpm_policies; 3811 if (!policy_ctxt) 3812 return NULL; 3813 3814 for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) { 3815 if (policy_ctxt->policies[i].policy_type == p_type) 3816 return &policy_ctxt->policies[i]; 3817 } 3818 3819 return NULL; 3820 } 3821 3822 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, 3823 int level) 3824 { 3825 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; 3826 struct smu_dpm_policy *dpm_policy = NULL; 3827 struct smu_dpm_policy_ctxt *policy_ctxt; 3828 int ret = -EOPNOTSUPP; 3829 3830 policy_ctxt = dpm_ctxt->dpm_policies; 3831 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || 3832 !policy_ctxt->policy_mask) 3833 return ret; 3834 3835 if (level < 0 || level >= PP_POLICY_MAX_LEVELS) 3836 return -EINVAL; 3837 3838 dpm_policy = smu_get_pm_policy(smu, p_type); 3839 3840 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy) 3841 return ret; 3842 3843 if (dpm_policy->current_level == level) 3844 return 0; 3845 3846 ret = dpm_policy->set_policy(smu, level); 3847 3848 if (!ret) 3849 dpm_policy->current_level = level; 3850 3851 return ret; 3852 } 3853 3854 static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table) 3855 { 3856 struct smu_context *smu = handle; 3857 struct smu_table_context *smu_table = &smu->smu_table; 3858 struct smu_table *tables = smu_table->tables; 3859 enum smu_table_id table_id; 3860 3861 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3862 return -EOPNOTSUPP; 3863 3864 if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics) 3865 return -EOPNOTSUPP; 3866 3867 table_id = smu_metrics_get_temp_table_id(type); 3868 3869 if (table_id == SMU_TABLE_COUNT) 3870 return -EINVAL; 3871 3872 /* If the request is to get size alone, return the cached table size */ 3873 if (!table && tables[table_id].cache.size) 3874 return tables[table_id].cache.size; 3875 3876 if (smu_table_cache_is_valid(&tables[table_id])) { 3877 memcpy(table, tables[table_id].cache.buffer, 3878 tables[table_id].cache.size); 3879 return tables[table_id].cache.size; 3880 } 3881 3882 return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table); 3883 } 3884 3885 static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type) 3886 { 3887 struct smu_context *smu = handle; 3888 bool ret = false; 3889 3890 if (!smu->pm_enabled) 3891 return false; 3892 3893 if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported) 3894 ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type); 3895 3896 return ret; 3897 } 3898 3899 static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table) 3900 { 3901 struct smu_context *smu = handle; 3902 3903 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3904 return -EOPNOTSUPP; 3905 3906 if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics) 3907 return -EOPNOTSUPP; 3908 3909 return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table); 3910 } 3911 3912 static const struct amd_pm_funcs swsmu_pm_funcs = { 3913 /* export for sysfs */ 3914 .set_fan_control_mode = smu_set_fan_control_mode, 3915 .get_fan_control_mode = smu_get_fan_control_mode, 3916 .set_fan_speed_pwm = smu_set_fan_speed_pwm, 3917 .get_fan_speed_pwm = smu_get_fan_speed_pwm, 3918 .force_clock_level = smu_force_ppclk_levels, 3919 .print_clock_levels = smu_print_ppclk_levels, 3920 .emit_clock_levels = smu_emit_ppclk_levels, 3921 .force_performance_level = smu_force_performance_level, 3922 .read_sensor = smu_read_sensor, 3923 .get_apu_thermal_limit = smu_get_apu_thermal_limit, 3924 .set_apu_thermal_limit = smu_set_apu_thermal_limit, 3925 .get_performance_level = smu_get_performance_level, 3926 .get_current_power_state = smu_get_current_power_state, 3927 .get_fan_speed_rpm = smu_get_fan_speed_rpm, 3928 .set_fan_speed_rpm = smu_set_fan_speed_rpm, 3929 .get_pp_num_states = smu_get_power_num_states, 3930 .get_pp_table = smu_sys_get_pp_table, 3931 .set_pp_table = smu_sys_set_pp_table, 3932 .switch_power_profile = smu_switch_power_profile, 3933 .pause_power_profile = smu_pause_power_profile, 3934 /* export to amdgpu */ 3935 .dispatch_tasks = smu_handle_dpm_task, 3936 .load_firmware = smu_load_microcode, 3937 .set_powergating_by_smu = smu_dpm_set_power_gate, 3938 .set_power_limit = smu_set_power_limit, 3939 .get_power_limit = smu_get_power_limit, 3940 .get_power_profile_mode = smu_get_power_profile_mode, 3941 .set_power_profile_mode = smu_set_power_profile_mode, 3942 .odn_edit_dpm_table = smu_od_edit_dpm_table, 3943 .set_mp1_state = smu_set_mp1_state, 3944 .gfx_state_change_set = smu_gfx_state_change_set, 3945 /* export to DC */ 3946 .get_sclk = smu_get_sclk, 3947 .get_mclk = smu_get_mclk, 3948 .display_configuration_change = smu_display_configuration_change, 3949 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, 3950 .display_clock_voltage_request = smu_display_clock_voltage_request, 3951 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, 3952 .set_active_display_count = smu_set_display_count, 3953 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, 3954 .get_asic_baco_capability = smu_get_baco_capability, 3955 .set_asic_baco_state = smu_baco_set_state, 3956 .get_ppfeature_status = smu_sys_get_pp_feature_mask, 3957 .set_ppfeature_status = smu_sys_set_pp_feature_mask, 3958 .asic_reset_mode_2 = smu_mode2_reset, 3959 .asic_reset_enable_gfx_features = smu_enable_gfx_features, 3960 .set_df_cstate = smu_set_df_cstate, 3961 .set_xgmi_pstate = smu_set_xgmi_pstate, 3962 .get_gpu_metrics = smu_sys_get_gpu_metrics, 3963 .get_pm_metrics = smu_sys_get_pm_metrics, 3964 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, 3965 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, 3966 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, 3967 .get_uclk_dpm_states = smu_get_uclk_dpm_states, 3968 .get_dpm_clock_table = smu_get_dpm_clock_table, 3969 .get_smu_prv_buf_details = smu_get_prv_buffer_details, 3970 .get_xcp_metrics = smu_sys_get_xcp_metrics, 3971 .get_temp_metrics = smu_sys_get_temp_metrics, 3972 .temp_metrics_is_supported = smu_temp_metrics_is_supported, 3973 }; 3974 3975 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, 3976 uint64_t event_arg) 3977 { 3978 int ret = -EINVAL; 3979 3980 if (smu->ppt_funcs->wait_for_event) 3981 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); 3982 3983 return ret; 3984 } 3985 3986 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) 3987 { 3988 3989 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) 3990 return -EOPNOTSUPP; 3991 3992 /* Confirm the buffer allocated is of correct size */ 3993 if (size != smu->stb_context.stb_buf_size) 3994 return -EINVAL; 3995 3996 /* 3997 * No need to lock smu mutex as we access STB directly through MMIO 3998 * and not going through SMU messaging route (for now at least). 3999 * For registers access rely on implementation internal locking. 4000 */ 4001 return smu->ppt_funcs->stb_collect_info(smu, buf, size); 4002 } 4003 4004 #if defined(CONFIG_DEBUG_FS) 4005 4006 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) 4007 { 4008 struct amdgpu_device *adev = filp->f_inode->i_private; 4009 struct smu_context *smu = adev->powerplay.pp_handle; 4010 unsigned char *buf; 4011 int r; 4012 4013 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); 4014 if (!buf) 4015 return -ENOMEM; 4016 4017 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); 4018 if (r) 4019 goto out; 4020 4021 filp->private_data = buf; 4022 4023 return 0; 4024 4025 out: 4026 kvfree(buf); 4027 return r; 4028 } 4029 4030 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, 4031 loff_t *pos) 4032 { 4033 struct amdgpu_device *adev = filp->f_inode->i_private; 4034 struct smu_context *smu = adev->powerplay.pp_handle; 4035 4036 4037 if (!filp->private_data) 4038 return -EINVAL; 4039 4040 return simple_read_from_buffer(buf, 4041 size, 4042 pos, filp->private_data, 4043 smu->stb_context.stb_buf_size); 4044 } 4045 4046 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp) 4047 { 4048 kvfree(filp->private_data); 4049 filp->private_data = NULL; 4050 4051 return 0; 4052 } 4053 4054 /* 4055 * We have to define not only read method but also 4056 * open and release because .read takes up to PAGE_SIZE 4057 * data each time so and so is invoked multiple times. 4058 * We allocate the STB buffer in .open and release it 4059 * in .release 4060 */ 4061 static const struct file_operations smu_stb_debugfs_fops = { 4062 .owner = THIS_MODULE, 4063 .open = smu_stb_debugfs_open, 4064 .read = smu_stb_debugfs_read, 4065 .release = smu_stb_debugfs_release, 4066 .llseek = default_llseek, 4067 }; 4068 4069 #endif 4070 4071 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) 4072 { 4073 #if defined(CONFIG_DEBUG_FS) 4074 4075 struct smu_context *smu = adev->powerplay.pp_handle; 4076 4077 if (!smu || (!smu->stb_context.stb_buf_size)) 4078 return; 4079 4080 debugfs_create_file_size("amdgpu_smu_stb_dump", 4081 S_IRUSR, 4082 adev_to_drm(adev)->primary->debugfs_root, 4083 adev, 4084 &smu_stb_debugfs_fops, 4085 smu->stb_context.stb_buf_size); 4086 #endif 4087 } 4088 4089 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) 4090 { 4091 int ret = 0; 4092 4093 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) 4094 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); 4095 4096 return ret; 4097 } 4098 4099 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) 4100 { 4101 int ret = 0; 4102 4103 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) 4104 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); 4105 4106 return ret; 4107 } 4108 4109 int smu_send_rma_reason(struct smu_context *smu) 4110 { 4111 int ret = 0; 4112 4113 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason) 4114 ret = smu->ppt_funcs->send_rma_reason(smu); 4115 4116 return ret; 4117 } 4118 4119 /** 4120 * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU 4121 * @smu: smu_context pointer 4122 * 4123 * This function checks if the SMU supports resetting the SDMA engine. 4124 * It returns true if supported, false otherwise. 4125 */ 4126 bool smu_reset_sdma_is_supported(struct smu_context *smu) 4127 { 4128 return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET); 4129 } 4130 4131 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) 4132 { 4133 int ret = 0; 4134 4135 if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma) 4136 ret = smu->ppt_funcs->reset_sdma(smu, inst_mask); 4137 4138 return ret; 4139 } 4140 4141 bool smu_reset_vcn_is_supported(struct smu_context *smu) 4142 { 4143 return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET); 4144 } 4145 4146 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask) 4147 { 4148 if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn) 4149 smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask); 4150 4151 return 0; 4152 } 4153