1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 #include <linux/power_supply.h> 28 #include <linux/reboot.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_smu.h" 32 #include "smu_internal.h" 33 #include "atom.h" 34 #include "arcturus_ppt.h" 35 #include "navi10_ppt.h" 36 #include "sienna_cichlid_ppt.h" 37 #include "renoir_ppt.h" 38 #include "vangogh_ppt.h" 39 #include "aldebaran_ppt.h" 40 #include "yellow_carp_ppt.h" 41 #include "cyan_skillfish_ppt.h" 42 #include "smu_v13_0_0_ppt.h" 43 #include "smu_v13_0_4_ppt.h" 44 #include "smu_v13_0_5_ppt.h" 45 #include "smu_v13_0_6_ppt.h" 46 #include "smu_v13_0_7_ppt.h" 47 #include "smu_v14_0_0_ppt.h" 48 #include "smu_v14_0_2_ppt.h" 49 #include "amd_pcie.h" 50 51 /* 52 * DO NOT use these for err/warn/info/debug messages. 53 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 54 * They are more MGPU friendly. 55 */ 56 #undef pr_err 57 #undef pr_warn 58 #undef pr_info 59 #undef pr_debug 60 61 static const struct amd_pm_funcs swsmu_pm_funcs; 62 static int smu_force_smuclk_levels(struct smu_context *smu, 63 enum smu_clk_type clk_type, 64 uint32_t mask); 65 static int smu_handle_task(struct smu_context *smu, 66 enum amd_dpm_forced_level level, 67 enum amd_pp_task task_id); 68 static int smu_reset(struct smu_context *smu); 69 static int smu_set_fan_speed_pwm(void *handle, u32 speed); 70 static int smu_set_fan_control_mode(void *handle, u32 value); 71 static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit); 72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); 73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); 74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); 75 static void smu_power_profile_mode_get(struct smu_context *smu, 76 enum PP_SMC_POWER_PROFILE profile_mode); 77 static void smu_power_profile_mode_put(struct smu_context *smu, 78 enum PP_SMC_POWER_PROFILE profile_mode); 79 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type); 80 static int smu_od_edit_dpm_table(void *handle, 81 enum PP_OD_DPM_TABLE_COMMAND type, 82 long *input, uint32_t size); 83 84 static int smu_sys_get_pp_feature_mask(void *handle, 85 char *buf) 86 { 87 struct smu_context *smu = handle; 88 89 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 90 return -EOPNOTSUPP; 91 92 return smu_get_pp_feature_mask(smu, buf); 93 } 94 95 static int smu_sys_set_pp_feature_mask(void *handle, 96 uint64_t new_mask) 97 { 98 struct smu_context *smu = handle; 99 100 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 101 return -EOPNOTSUPP; 102 103 return smu_set_pp_feature_mask(smu, new_mask); 104 } 105 106 int smu_set_residency_gfxoff(struct smu_context *smu, bool value) 107 { 108 if (!smu->ppt_funcs->set_gfx_off_residency) 109 return -EINVAL; 110 111 return smu_set_gfx_off_residency(smu, value); 112 } 113 114 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) 115 { 116 if (!smu->ppt_funcs->get_gfx_off_residency) 117 return -EINVAL; 118 119 return smu_get_gfx_off_residency(smu, value); 120 } 121 122 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) 123 { 124 if (!smu->ppt_funcs->get_gfx_off_entrycount) 125 return -EINVAL; 126 127 return smu_get_gfx_off_entrycount(smu, value); 128 } 129 130 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) 131 { 132 if (!smu->ppt_funcs->get_gfx_off_status) 133 return -EINVAL; 134 135 *value = smu_get_gfx_off_status(smu); 136 137 return 0; 138 } 139 140 int smu_set_soft_freq_range(struct smu_context *smu, 141 enum pp_clock_type type, 142 uint32_t min, 143 uint32_t max) 144 { 145 enum smu_clk_type clk_type; 146 int ret = 0; 147 148 clk_type = smu_convert_to_smuclk(type); 149 if (clk_type == SMU_CLK_COUNT) 150 return -EINVAL; 151 152 if (smu->ppt_funcs->set_soft_freq_limited_range) 153 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 154 clk_type, 155 min, 156 max, 157 false); 158 159 return ret; 160 } 161 162 int smu_get_dpm_freq_range(struct smu_context *smu, 163 enum smu_clk_type clk_type, 164 uint32_t *min, 165 uint32_t *max) 166 { 167 int ret = -ENOTSUPP; 168 169 if (!min && !max) 170 return -EINVAL; 171 172 if (smu->ppt_funcs->get_dpm_ultimate_freq) 173 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 174 clk_type, 175 min, 176 max); 177 178 return ret; 179 } 180 181 int smu_set_gfx_power_up_by_imu(struct smu_context *smu) 182 { 183 int ret = 0; 184 struct amdgpu_device *adev = smu->adev; 185 186 if (smu->ppt_funcs->set_gfx_power_up_by_imu) { 187 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); 188 if (ret) 189 dev_err(adev->dev, "Failed to enable gfx imu!\n"); 190 } 191 return ret; 192 } 193 194 static u32 smu_get_mclk(void *handle, bool low) 195 { 196 struct smu_context *smu = handle; 197 uint32_t clk_freq; 198 int ret = 0; 199 200 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, 201 low ? &clk_freq : NULL, 202 !low ? &clk_freq : NULL); 203 if (ret) 204 return 0; 205 return clk_freq * 100; 206 } 207 208 static u32 smu_get_sclk(void *handle, bool low) 209 { 210 struct smu_context *smu = handle; 211 uint32_t clk_freq; 212 int ret = 0; 213 214 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, 215 low ? &clk_freq : NULL, 216 !low ? &clk_freq : NULL); 217 if (ret) 218 return 0; 219 return clk_freq * 100; 220 } 221 222 static int smu_set_gfx_imu_enable(struct smu_context *smu) 223 { 224 struct amdgpu_device *adev = smu->adev; 225 226 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 227 return 0; 228 229 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix) 230 return 0; 231 232 return smu_set_gfx_power_up_by_imu(smu); 233 } 234 235 static bool is_vcn_enabled(struct amdgpu_device *adev) 236 { 237 int i; 238 239 for (i = 0; i < adev->num_ip_blocks; i++) { 240 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN || 241 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) && 242 !adev->ip_blocks[i].status.valid) 243 return false; 244 } 245 246 return true; 247 } 248 249 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 250 bool enable, 251 int inst) 252 { 253 struct smu_power_context *smu_power = &smu->smu_power; 254 struct smu_power_gate *power_gate = &smu_power->power_gate; 255 int ret = 0; 256 257 /* 258 * don't poweron vcn/jpeg when they are skipped. 259 */ 260 if (!is_vcn_enabled(smu->adev)) 261 return 0; 262 263 if (!smu->ppt_funcs->dpm_set_vcn_enable) 264 return 0; 265 266 if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable) 267 return 0; 268 269 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst); 270 if (!ret) 271 atomic_set(&power_gate->vcn_gated[inst], !enable); 272 273 return ret; 274 } 275 276 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 277 bool enable) 278 { 279 struct smu_power_context *smu_power = &smu->smu_power; 280 struct smu_power_gate *power_gate = &smu_power->power_gate; 281 int ret = 0; 282 283 if (!is_vcn_enabled(smu->adev)) 284 return 0; 285 286 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 287 return 0; 288 289 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 290 return 0; 291 292 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 293 if (!ret) 294 atomic_set(&power_gate->jpeg_gated, !enable); 295 296 return ret; 297 } 298 299 static int smu_dpm_set_vpe_enable(struct smu_context *smu, 300 bool enable) 301 { 302 struct smu_power_context *smu_power = &smu->smu_power; 303 struct smu_power_gate *power_gate = &smu_power->power_gate; 304 int ret = 0; 305 306 if (!smu->ppt_funcs->dpm_set_vpe_enable) 307 return 0; 308 309 if (atomic_read(&power_gate->vpe_gated) ^ enable) 310 return 0; 311 312 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable); 313 if (!ret) 314 atomic_set(&power_gate->vpe_gated, !enable); 315 316 return ret; 317 } 318 319 static int smu_dpm_set_isp_enable(struct smu_context *smu, 320 bool enable) 321 { 322 struct smu_power_context *smu_power = &smu->smu_power; 323 struct smu_power_gate *power_gate = &smu_power->power_gate; 324 int ret; 325 326 if (!smu->ppt_funcs->dpm_set_isp_enable) 327 return 0; 328 329 if (atomic_read(&power_gate->isp_gated) ^ enable) 330 return 0; 331 332 ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable); 333 if (!ret) 334 atomic_set(&power_gate->isp_gated, !enable); 335 336 return ret; 337 } 338 339 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, 340 bool enable) 341 { 342 struct smu_power_context *smu_power = &smu->smu_power; 343 struct smu_power_gate *power_gate = &smu_power->power_gate; 344 int ret = 0; 345 346 if (!smu->adev->enable_umsch_mm) 347 return 0; 348 349 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable) 350 return 0; 351 352 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable) 353 return 0; 354 355 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable); 356 if (!ret) 357 atomic_set(&power_gate->umsch_mm_gated, !enable); 358 359 return ret; 360 } 361 362 static int smu_set_mall_enable(struct smu_context *smu) 363 { 364 int ret = 0; 365 366 if (!smu->ppt_funcs->set_mall_enable) 367 return 0; 368 369 ret = smu->ppt_funcs->set_mall_enable(smu); 370 371 return ret; 372 } 373 374 /** 375 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 376 * 377 * @handle: smu_context pointer 378 * @block_type: the IP block to power gate/ungate 379 * @gate: to power gate if true, ungate otherwise 380 * @inst: the instance of the IP block to power gate/ungate 381 * 382 * This API uses no smu->mutex lock protection due to: 383 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 384 * This is guarded to be race condition free by the caller. 385 * 2. Or get called on user setting request of power_dpm_force_performance_level. 386 * Under this case, the smu->mutex lock protection is already enforced on 387 * the parent API smu_force_performance_level of the call path. 388 */ 389 static int smu_dpm_set_power_gate(void *handle, 390 uint32_t block_type, 391 bool gate, 392 int inst) 393 { 394 struct smu_context *smu = handle; 395 int ret = 0; 396 397 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { 398 dev_WARN(smu->adev->dev, 399 "SMU uninitialized but power %s requested for %u!\n", 400 gate ? "gate" : "ungate", block_type); 401 return -EOPNOTSUPP; 402 } 403 404 switch (block_type) { 405 /* 406 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 407 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 408 */ 409 case AMD_IP_BLOCK_TYPE_UVD: 410 case AMD_IP_BLOCK_TYPE_VCN: 411 ret = smu_dpm_set_vcn_enable(smu, !gate, inst); 412 if (ret) 413 dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n", 414 gate ? "gate" : "ungate", inst); 415 break; 416 case AMD_IP_BLOCK_TYPE_GFX: 417 ret = smu_gfx_off_control(smu, gate); 418 if (ret) 419 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 420 gate ? "enable" : "disable"); 421 break; 422 case AMD_IP_BLOCK_TYPE_SDMA: 423 ret = smu_powergate_sdma(smu, gate); 424 if (ret) 425 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 426 gate ? "gate" : "ungate"); 427 break; 428 case AMD_IP_BLOCK_TYPE_JPEG: 429 ret = smu_dpm_set_jpeg_enable(smu, !gate); 430 if (ret) 431 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 432 gate ? "gate" : "ungate"); 433 break; 434 case AMD_IP_BLOCK_TYPE_VPE: 435 ret = smu_dpm_set_vpe_enable(smu, !gate); 436 if (ret) 437 dev_err(smu->adev->dev, "Failed to power %s VPE!\n", 438 gate ? "gate" : "ungate"); 439 break; 440 case AMD_IP_BLOCK_TYPE_ISP: 441 ret = smu_dpm_set_isp_enable(smu, !gate); 442 if (ret) 443 dev_err(smu->adev->dev, "Failed to power %s ISP!\n", 444 gate ? "gate" : "ungate"); 445 break; 446 default: 447 dev_err(smu->adev->dev, "Unsupported block type!\n"); 448 return -EINVAL; 449 } 450 451 return ret; 452 } 453 454 /** 455 * smu_set_user_clk_dependencies - set user profile clock dependencies 456 * 457 * @smu: smu_context pointer 458 * @clk: enum smu_clk_type type 459 * 460 * Enable/Disable the clock dependency for the @clk type. 461 */ 462 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) 463 { 464 if (smu->adev->in_suspend) 465 return; 466 467 if (clk == SMU_MCLK) { 468 smu->user_dpm_profile.clk_dependency = 0; 469 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); 470 } else if (clk == SMU_FCLK) { 471 /* MCLK takes precedence over FCLK */ 472 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 473 return; 474 475 smu->user_dpm_profile.clk_dependency = 0; 476 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); 477 } else if (clk == SMU_SOCCLK) { 478 /* MCLK takes precedence over SOCCLK */ 479 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 480 return; 481 482 smu->user_dpm_profile.clk_dependency = 0; 483 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); 484 } else 485 /* Add clk dependencies here, if any */ 486 return; 487 } 488 489 /** 490 * smu_restore_dpm_user_profile - reinstate user dpm profile 491 * 492 * @smu: smu_context pointer 493 * 494 * Restore the saved user power configurations include power limit, 495 * clock frequencies, fan control mode and fan speed. 496 */ 497 static void smu_restore_dpm_user_profile(struct smu_context *smu) 498 { 499 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 500 int ret = 0; 501 502 if (!smu->adev->in_suspend) 503 return; 504 505 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 506 return; 507 508 /* Enable restore flag */ 509 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; 510 511 /* set the user dpm power limits */ 512 for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) { 513 if (!smu->user_dpm_profile.power_limits[i]) 514 continue; 515 ret = smu_set_power_limit(smu, i, 516 smu->user_dpm_profile.power_limits[i]); 517 if (ret) 518 dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i); 519 } 520 521 /* set the user dpm clock configurations */ 522 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 523 enum smu_clk_type clk_type; 524 525 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) { 526 /* 527 * Iterate over smu clk type and force the saved user clk 528 * configs, skip if clock dependency is enabled 529 */ 530 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && 531 smu->user_dpm_profile.clk_mask[clk_type]) { 532 ret = smu_force_smuclk_levels(smu, clk_type, 533 smu->user_dpm_profile.clk_mask[clk_type]); 534 if (ret) 535 dev_err(smu->adev->dev, 536 "Failed to set clock type = %d\n", clk_type); 537 } 538 } 539 } 540 541 /* set the user dpm fan configurations */ 542 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || 543 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { 544 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); 545 if (ret != -EOPNOTSUPP) { 546 smu->user_dpm_profile.fan_speed_pwm = 0; 547 smu->user_dpm_profile.fan_speed_rpm = 0; 548 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; 549 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); 550 } 551 552 if (smu->user_dpm_profile.fan_speed_pwm) { 553 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); 554 if (ret != -EOPNOTSUPP) 555 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); 556 } 557 558 if (smu->user_dpm_profile.fan_speed_rpm) { 559 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); 560 if (ret != -EOPNOTSUPP) 561 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); 562 } 563 } 564 565 /* Restore user customized OD settings */ 566 if (smu->user_dpm_profile.user_od) { 567 if (smu->ppt_funcs->restore_user_od_settings) { 568 ret = smu->ppt_funcs->restore_user_od_settings(smu); 569 if (ret) 570 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); 571 } 572 } 573 574 /* Disable restore flag */ 575 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; 576 } 577 578 static int smu_get_power_num_states(void *handle, 579 struct pp_states_info *state_info) 580 { 581 if (!state_info) 582 return -EINVAL; 583 584 /* not support power state */ 585 memset(state_info, 0, sizeof(struct pp_states_info)); 586 state_info->nums = 1; 587 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 588 589 return 0; 590 } 591 592 bool is_support_sw_smu(struct amdgpu_device *adev) 593 { 594 /* vega20 is 11.0.2, but it's supported via the powerplay code */ 595 if (adev->asic_type == CHIP_VEGA20) 596 return false; 597 598 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) && 599 amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC)) 600 return true; 601 602 return false; 603 } 604 605 bool is_support_cclk_dpm(struct amdgpu_device *adev) 606 { 607 struct smu_context *smu = adev->powerplay.pp_handle; 608 609 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) 610 return false; 611 612 return true; 613 } 614 615 int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, 616 uint32_t param, uint32_t *read_arg) 617 { 618 struct smu_context *smu = adev->powerplay.pp_handle; 619 int ret = -EOPNOTSUPP; 620 621 if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg) 622 ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg); 623 624 return ret; 625 } 626 627 static int smu_sys_get_pp_table(void *handle, 628 char **table) 629 { 630 struct smu_context *smu = handle; 631 struct smu_table_context *smu_table = &smu->smu_table; 632 633 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 634 return -EOPNOTSUPP; 635 636 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 637 return -EINVAL; 638 639 if (smu_table->hardcode_pptable) 640 *table = smu_table->hardcode_pptable; 641 else 642 *table = smu_table->power_play_table; 643 644 return smu_table->power_play_table_size; 645 } 646 647 static int smu_sys_set_pp_table(void *handle, 648 const char *buf, 649 size_t size) 650 { 651 struct smu_context *smu = handle; 652 struct smu_table_context *smu_table = &smu->smu_table; 653 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 654 int ret = 0; 655 656 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 657 return -EOPNOTSUPP; 658 659 if (header->usStructureSize != size) { 660 dev_err(smu->adev->dev, "pp table size not matched !\n"); 661 return -EIO; 662 } 663 664 if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) { 665 kfree(smu_table->hardcode_pptable); 666 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 667 if (!smu_table->hardcode_pptable) 668 return -ENOMEM; 669 } 670 671 memcpy(smu_table->hardcode_pptable, buf, size); 672 smu_table->power_play_table = smu_table->hardcode_pptable; 673 smu_table->power_play_table_size = size; 674 675 /* 676 * Special hw_fini action(for Navi1x, the DPMs disablement will be 677 * skipped) may be needed for custom pptable uploading. 678 */ 679 smu->uploading_custom_pp_table = true; 680 681 ret = smu_reset(smu); 682 if (ret) 683 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 684 685 smu->uploading_custom_pp_table = false; 686 687 return ret; 688 } 689 690 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 691 { 692 struct smu_feature *feature = &smu->smu_feature; 693 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 694 int ret = 0; 695 696 /* 697 * With SCPM enabled, the allowed featuremasks setting(via 698 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted. 699 * That means there is no way to let PMFW knows the settings below. 700 * Thus, we just assume all the features are allowed under 701 * such scenario. 702 */ 703 if (smu->adev->scpm_enabled) { 704 bitmap_fill(feature->allowed, SMU_FEATURE_MAX); 705 return 0; 706 } 707 708 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 709 710 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 711 SMU_FEATURE_MAX/32); 712 if (ret) 713 return ret; 714 715 bitmap_or(feature->allowed, feature->allowed, 716 (unsigned long *)allowed_feature_mask, 717 feature->feature_num); 718 719 return ret; 720 } 721 722 static int smu_set_funcs(struct amdgpu_device *adev) 723 { 724 struct smu_context *smu = adev->powerplay.pp_handle; 725 726 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 727 smu->od_enabled = true; 728 729 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 730 case IP_VERSION(11, 0, 0): 731 case IP_VERSION(11, 0, 5): 732 case IP_VERSION(11, 0, 9): 733 navi10_set_ppt_funcs(smu); 734 break; 735 case IP_VERSION(11, 0, 7): 736 case IP_VERSION(11, 0, 11): 737 case IP_VERSION(11, 0, 12): 738 case IP_VERSION(11, 0, 13): 739 sienna_cichlid_set_ppt_funcs(smu); 740 break; 741 case IP_VERSION(12, 0, 0): 742 case IP_VERSION(12, 0, 1): 743 renoir_set_ppt_funcs(smu); 744 break; 745 case IP_VERSION(11, 5, 0): 746 case IP_VERSION(11, 5, 2): 747 vangogh_set_ppt_funcs(smu); 748 break; 749 case IP_VERSION(13, 0, 1): 750 case IP_VERSION(13, 0, 3): 751 case IP_VERSION(13, 0, 8): 752 yellow_carp_set_ppt_funcs(smu); 753 break; 754 case IP_VERSION(13, 0, 4): 755 case IP_VERSION(13, 0, 11): 756 smu_v13_0_4_set_ppt_funcs(smu); 757 break; 758 case IP_VERSION(13, 0, 5): 759 smu_v13_0_5_set_ppt_funcs(smu); 760 break; 761 case IP_VERSION(11, 0, 8): 762 cyan_skillfish_set_ppt_funcs(smu); 763 break; 764 case IP_VERSION(11, 0, 2): 765 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 766 arcturus_set_ppt_funcs(smu); 767 /* OD is not supported on Arcturus */ 768 smu->od_enabled = false; 769 break; 770 case IP_VERSION(13, 0, 2): 771 aldebaran_set_ppt_funcs(smu); 772 /* Enable pp_od_clk_voltage node */ 773 smu->od_enabled = true; 774 break; 775 case IP_VERSION(13, 0, 0): 776 case IP_VERSION(13, 0, 10): 777 smu_v13_0_0_set_ppt_funcs(smu); 778 break; 779 case IP_VERSION(13, 0, 6): 780 case IP_VERSION(13, 0, 14): 781 case IP_VERSION(13, 0, 12): 782 smu_v13_0_6_set_ppt_funcs(smu); 783 /* Enable pp_od_clk_voltage node */ 784 smu->od_enabled = true; 785 break; 786 case IP_VERSION(13, 0, 7): 787 smu_v13_0_7_set_ppt_funcs(smu); 788 break; 789 case IP_VERSION(14, 0, 0): 790 case IP_VERSION(14, 0, 1): 791 case IP_VERSION(14, 0, 4): 792 case IP_VERSION(14, 0, 5): 793 smu_v14_0_0_set_ppt_funcs(smu); 794 break; 795 case IP_VERSION(14, 0, 2): 796 case IP_VERSION(14, 0, 3): 797 smu_v14_0_2_set_ppt_funcs(smu); 798 break; 799 default: 800 return -EINVAL; 801 } 802 803 return 0; 804 } 805 806 static int smu_early_init(struct amdgpu_ip_block *ip_block) 807 { 808 struct amdgpu_device *adev = ip_block->adev; 809 struct smu_context *smu; 810 int r; 811 812 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); 813 if (!smu) 814 return -ENOMEM; 815 816 smu->adev = adev; 817 smu->pm_enabled = !!amdgpu_dpm; 818 smu->is_apu = false; 819 smu->smu_baco.state = SMU_BACO_STATE_NONE; 820 smu->smu_baco.platform_support = false; 821 smu->smu_baco.maco_support = false; 822 smu->user_dpm_profile.fan_mode = -1; 823 smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN; 824 825 mutex_init(&smu->message_lock); 826 827 adev->powerplay.pp_handle = smu; 828 adev->powerplay.pp_funcs = &swsmu_pm_funcs; 829 830 r = smu_set_funcs(adev); 831 if (r) 832 return r; 833 return smu_init_microcode(smu); 834 } 835 836 static int smu_set_default_dpm_table(struct smu_context *smu) 837 { 838 struct amdgpu_device *adev = smu->adev; 839 struct smu_power_context *smu_power = &smu->smu_power; 840 struct smu_power_gate *power_gate = &smu_power->power_gate; 841 int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i; 842 int ret = 0; 843 844 if (!smu->ppt_funcs->set_default_dpm_table) 845 return 0; 846 847 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 848 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 849 vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]); 850 } 851 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) 852 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 853 854 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 855 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 856 ret = smu_dpm_set_vcn_enable(smu, true, i); 857 if (ret) 858 return ret; 859 } 860 } 861 862 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { 863 ret = smu_dpm_set_jpeg_enable(smu, true); 864 if (ret) 865 goto err_out; 866 } 867 868 ret = smu->ppt_funcs->set_default_dpm_table(smu); 869 if (ret) 870 dev_err(smu->adev->dev, 871 "Failed to setup default dpm clock tables!\n"); 872 873 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) 874 smu_dpm_set_jpeg_enable(smu, !jpeg_gate); 875 err_out: 876 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 877 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 878 smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i); 879 } 880 881 return ret; 882 } 883 884 static int smu_apply_default_config_table_settings(struct smu_context *smu) 885 { 886 struct amdgpu_device *adev = smu->adev; 887 int ret = 0; 888 889 ret = smu_get_default_config_table_settings(smu, 890 &adev->pm.config_table); 891 if (ret) 892 return ret; 893 894 return smu_set_config_table(smu, &adev->pm.config_table); 895 } 896 897 static int smu_late_init(struct amdgpu_ip_block *ip_block) 898 { 899 struct amdgpu_device *adev = ip_block->adev; 900 struct smu_context *smu = adev->powerplay.pp_handle; 901 int ret = 0; 902 903 smu_set_fine_grain_gfx_freq_parameters(smu); 904 905 if (!smu->pm_enabled) 906 return 0; 907 908 ret = smu_post_init(smu); 909 if (ret) { 910 dev_err(adev->dev, "Failed to post smu init!\n"); 911 return ret; 912 } 913 914 /* 915 * Explicitly notify PMFW the power mode the system in. Since 916 * the PMFW may boot the ASIC with a different mode. 917 * For those supporting ACDC switch via gpio, PMFW will 918 * handle the switch automatically. Driver involvement 919 * is unnecessary. 920 */ 921 adev->pm.ac_power = power_supply_is_system_supplied() > 0; 922 smu_set_ac_dc(smu); 923 924 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) || 925 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3))) 926 return 0; 927 928 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { 929 ret = smu_set_default_od_settings(smu); 930 if (ret) { 931 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 932 return ret; 933 } 934 } 935 936 ret = smu_populate_umd_state_clk(smu); 937 if (ret) { 938 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 939 return ret; 940 } 941 942 ret = smu_get_asic_power_limits(smu, 943 &smu->current_power_limit, 944 &smu->default_power_limit, 945 &smu->max_power_limit, 946 &smu->min_power_limit); 947 if (ret) { 948 dev_err(adev->dev, "Failed to get asic power limits!\n"); 949 return ret; 950 } 951 952 if (!amdgpu_sriov_vf(adev)) 953 smu_get_unique_id(smu); 954 955 smu_get_fan_parameters(smu); 956 957 smu_handle_task(smu, 958 smu->smu_dpm.dpm_level, 959 AMD_PP_TASK_COMPLETE_INIT); 960 961 ret = smu_apply_default_config_table_settings(smu); 962 if (ret && (ret != -EOPNOTSUPP)) { 963 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n"); 964 return ret; 965 } 966 967 smu_restore_dpm_user_profile(smu); 968 969 return 0; 970 } 971 972 static int smu_init_fb_allocations(struct smu_context *smu) 973 { 974 struct amdgpu_device *adev = smu->adev; 975 struct smu_table_context *smu_table = &smu->smu_table; 976 struct smu_table *tables = smu_table->tables; 977 struct smu_table *driver_table = &(smu_table->driver_table); 978 uint32_t max_table_size = 0; 979 int ret, i; 980 981 /* VRAM allocation for tool table */ 982 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 983 ret = amdgpu_bo_create_kernel(adev, 984 tables[SMU_TABLE_PMSTATUSLOG].size, 985 tables[SMU_TABLE_PMSTATUSLOG].align, 986 tables[SMU_TABLE_PMSTATUSLOG].domain, 987 &tables[SMU_TABLE_PMSTATUSLOG].bo, 988 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 989 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 990 if (ret) { 991 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 992 return ret; 993 } 994 } 995 996 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT; 997 /* VRAM allocation for driver table */ 998 for (i = 0; i < SMU_TABLE_COUNT; i++) { 999 if (tables[i].size == 0) 1000 continue; 1001 1002 /* If one of the tables has VRAM domain restriction, keep it in 1003 * VRAM 1004 */ 1005 if ((tables[i].domain & 1006 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) == 1007 AMDGPU_GEM_DOMAIN_VRAM) 1008 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 1009 1010 if (i == SMU_TABLE_PMSTATUSLOG) 1011 continue; 1012 1013 if (max_table_size < tables[i].size) 1014 max_table_size = tables[i].size; 1015 } 1016 1017 driver_table->size = max_table_size; 1018 driver_table->align = PAGE_SIZE; 1019 1020 ret = amdgpu_bo_create_kernel(adev, 1021 driver_table->size, 1022 driver_table->align, 1023 driver_table->domain, 1024 &driver_table->bo, 1025 &driver_table->mc_address, 1026 &driver_table->cpu_addr); 1027 if (ret) { 1028 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 1029 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 1030 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 1031 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 1032 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 1033 } 1034 1035 return ret; 1036 } 1037 1038 static int smu_fini_fb_allocations(struct smu_context *smu) 1039 { 1040 struct smu_table_context *smu_table = &smu->smu_table; 1041 struct smu_table *tables = smu_table->tables; 1042 struct smu_table *driver_table = &(smu_table->driver_table); 1043 1044 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 1045 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 1046 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 1047 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 1048 1049 amdgpu_bo_free_kernel(&driver_table->bo, 1050 &driver_table->mc_address, 1051 &driver_table->cpu_addr); 1052 1053 return 0; 1054 } 1055 1056 static void smu_update_gpu_addresses(struct smu_context *smu) 1057 { 1058 struct smu_table_context *smu_table = &smu->smu_table; 1059 struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG; 1060 struct smu_table *driver_table = &(smu_table->driver_table); 1061 struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table; 1062 1063 if (pm_status_table->bo) 1064 pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo); 1065 if (driver_table->bo) 1066 driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo); 1067 if (dummy_read_1_table->bo) 1068 dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo); 1069 } 1070 1071 /** 1072 * smu_alloc_memory_pool - allocate memory pool in the system memory 1073 * 1074 * @smu: amdgpu_device pointer 1075 * 1076 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 1077 * and DramLogSetDramAddr can notify it changed. 1078 * 1079 * Returns 0 on success, error on failure. 1080 */ 1081 static int smu_alloc_memory_pool(struct smu_context *smu) 1082 { 1083 struct amdgpu_device *adev = smu->adev; 1084 struct smu_table_context *smu_table = &smu->smu_table; 1085 struct smu_table *memory_pool = &smu_table->memory_pool; 1086 uint64_t pool_size = smu->pool_size; 1087 int ret = 0; 1088 1089 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 1090 return ret; 1091 1092 memory_pool->size = pool_size; 1093 memory_pool->align = PAGE_SIZE; 1094 memory_pool->domain = 1095 (adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ? 1096 AMDGPU_GEM_DOMAIN_VRAM : 1097 AMDGPU_GEM_DOMAIN_GTT; 1098 1099 switch (pool_size) { 1100 case SMU_MEMORY_POOL_SIZE_256_MB: 1101 case SMU_MEMORY_POOL_SIZE_512_MB: 1102 case SMU_MEMORY_POOL_SIZE_1_GB: 1103 case SMU_MEMORY_POOL_SIZE_2_GB: 1104 ret = amdgpu_bo_create_kernel(adev, 1105 memory_pool->size, 1106 memory_pool->align, 1107 memory_pool->domain, 1108 &memory_pool->bo, 1109 &memory_pool->mc_address, 1110 &memory_pool->cpu_addr); 1111 if (ret) 1112 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 1113 break; 1114 default: 1115 break; 1116 } 1117 1118 return ret; 1119 } 1120 1121 static int smu_free_memory_pool(struct smu_context *smu) 1122 { 1123 struct smu_table_context *smu_table = &smu->smu_table; 1124 struct smu_table *memory_pool = &smu_table->memory_pool; 1125 1126 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 1127 return 0; 1128 1129 amdgpu_bo_free_kernel(&memory_pool->bo, 1130 &memory_pool->mc_address, 1131 &memory_pool->cpu_addr); 1132 1133 memset(memory_pool, 0, sizeof(struct smu_table)); 1134 1135 return 0; 1136 } 1137 1138 static int smu_alloc_dummy_read_table(struct smu_context *smu) 1139 { 1140 struct smu_table_context *smu_table = &smu->smu_table; 1141 struct smu_table *dummy_read_1_table = 1142 &smu_table->dummy_read_1_table; 1143 struct amdgpu_device *adev = smu->adev; 1144 int ret = 0; 1145 1146 if (!dummy_read_1_table->size) 1147 return 0; 1148 1149 ret = amdgpu_bo_create_kernel(adev, 1150 dummy_read_1_table->size, 1151 dummy_read_1_table->align, 1152 dummy_read_1_table->domain, 1153 &dummy_read_1_table->bo, 1154 &dummy_read_1_table->mc_address, 1155 &dummy_read_1_table->cpu_addr); 1156 if (ret) 1157 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 1158 1159 return ret; 1160 } 1161 1162 static void smu_free_dummy_read_table(struct smu_context *smu) 1163 { 1164 struct smu_table_context *smu_table = &smu->smu_table; 1165 struct smu_table *dummy_read_1_table = 1166 &smu_table->dummy_read_1_table; 1167 1168 1169 amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 1170 &dummy_read_1_table->mc_address, 1171 &dummy_read_1_table->cpu_addr); 1172 1173 memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 1174 } 1175 1176 static int smu_smc_table_sw_init(struct smu_context *smu) 1177 { 1178 int ret; 1179 1180 /** 1181 * Create smu_table structure, and init smc tables such as 1182 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 1183 */ 1184 ret = smu_init_smc_tables(smu); 1185 if (ret) { 1186 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 1187 return ret; 1188 } 1189 1190 /** 1191 * Create smu_power_context structure, and allocate smu_dpm_context and 1192 * context size to fill the smu_power_context data. 1193 */ 1194 ret = smu_init_power(smu); 1195 if (ret) { 1196 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 1197 return ret; 1198 } 1199 1200 /* 1201 * allocate vram bos to store smc table contents. 1202 */ 1203 ret = smu_init_fb_allocations(smu); 1204 if (ret) 1205 return ret; 1206 1207 ret = smu_alloc_memory_pool(smu); 1208 if (ret) 1209 return ret; 1210 1211 ret = smu_alloc_dummy_read_table(smu); 1212 if (ret) 1213 return ret; 1214 1215 ret = smu_i2c_init(smu); 1216 if (ret) 1217 return ret; 1218 1219 return 0; 1220 } 1221 1222 static int smu_smc_table_sw_fini(struct smu_context *smu) 1223 { 1224 int ret; 1225 1226 smu_i2c_fini(smu); 1227 1228 smu_free_dummy_read_table(smu); 1229 1230 ret = smu_free_memory_pool(smu); 1231 if (ret) 1232 return ret; 1233 1234 ret = smu_fini_fb_allocations(smu); 1235 if (ret) 1236 return ret; 1237 1238 ret = smu_fini_power(smu); 1239 if (ret) { 1240 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 1241 return ret; 1242 } 1243 1244 ret = smu_fini_smc_tables(smu); 1245 if (ret) { 1246 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 1247 return ret; 1248 } 1249 1250 return 0; 1251 } 1252 1253 static void smu_throttling_logging_work_fn(struct work_struct *work) 1254 { 1255 struct smu_context *smu = container_of(work, struct smu_context, 1256 throttling_logging_work); 1257 1258 smu_log_thermal_throttling(smu); 1259 } 1260 1261 static void smu_interrupt_work_fn(struct work_struct *work) 1262 { 1263 struct smu_context *smu = container_of(work, struct smu_context, 1264 interrupt_work); 1265 1266 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 1267 smu->ppt_funcs->interrupt_work(smu); 1268 } 1269 1270 static void smu_swctf_delayed_work_handler(struct work_struct *work) 1271 { 1272 struct smu_context *smu = 1273 container_of(work, struct smu_context, swctf_delayed_work.work); 1274 struct smu_temperature_range *range = 1275 &smu->thermal_range; 1276 struct amdgpu_device *adev = smu->adev; 1277 uint32_t hotspot_tmp, size; 1278 1279 /* 1280 * If the hotspot temperature is confirmed as below SW CTF setting point 1281 * after the delay enforced, nothing will be done. 1282 * Otherwise, a graceful shutdown will be performed to prevent further damage. 1283 */ 1284 if (range->software_shutdown_temp && 1285 smu->ppt_funcs->read_sensor && 1286 !smu->ppt_funcs->read_sensor(smu, 1287 AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 1288 &hotspot_tmp, 1289 &size) && 1290 hotspot_tmp / 1000 < range->software_shutdown_temp) 1291 return; 1292 1293 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 1294 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 1295 orderly_poweroff(true); 1296 } 1297 1298 static void smu_init_xgmi_plpd_mode(struct smu_context *smu) 1299 { 1300 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm); 1301 struct smu_dpm_policy_ctxt *policy_ctxt; 1302 struct smu_dpm_policy *policy; 1303 1304 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD); 1305 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { 1306 if (policy) 1307 policy->current_level = XGMI_PLPD_DEFAULT; 1308 return; 1309 } 1310 1311 /* PMFW put PLPD into default policy after enabling the feature */ 1312 if (smu_feature_is_enabled(smu, 1313 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) { 1314 if (policy) 1315 policy->current_level = XGMI_PLPD_DEFAULT; 1316 } else { 1317 policy_ctxt = dpm_ctxt->dpm_policies; 1318 if (policy_ctxt) 1319 policy_ctxt->policy_mask &= 1320 ~BIT(PP_PM_POLICY_XGMI_PLPD); 1321 } 1322 } 1323 1324 static void smu_init_power_profile(struct smu_context *smu) 1325 { 1326 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) 1327 smu->power_profile_mode = 1328 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1329 smu_power_profile_mode_get(smu, smu->power_profile_mode); 1330 } 1331 1332 void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id) 1333 { 1334 struct smu_feature_cap *fea_cap = &smu->fea_cap; 1335 1336 if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) 1337 return; 1338 1339 set_bit(fea_id, fea_cap->cap_map); 1340 } 1341 1342 bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id) 1343 { 1344 struct smu_feature_cap *fea_cap = &smu->fea_cap; 1345 1346 if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) 1347 return false; 1348 1349 return test_bit(fea_id, fea_cap->cap_map); 1350 } 1351 1352 static void smu_feature_cap_init(struct smu_context *smu) 1353 { 1354 struct smu_feature_cap *fea_cap = &smu->fea_cap; 1355 1356 bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT); 1357 } 1358 1359 static int smu_sw_init(struct amdgpu_ip_block *ip_block) 1360 { 1361 struct amdgpu_device *adev = ip_block->adev; 1362 struct smu_context *smu = adev->powerplay.pp_handle; 1363 int i, ret; 1364 1365 smu->pool_size = adev->pm.smu_prv_buffer_size; 1366 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 1367 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 1368 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 1369 1370 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 1371 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 1372 atomic64_set(&smu->throttle_int_counter, 0); 1373 smu->watermarks_bitmap = 0; 1374 1375 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 1376 atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); 1377 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 1378 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); 1379 atomic_set(&smu->smu_power.power_gate.isp_gated, 1); 1380 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); 1381 1382 smu_init_power_profile(smu); 1383 smu->display_config = &adev->pm.pm_display_cfg; 1384 1385 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1386 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1387 1388 INIT_DELAYED_WORK(&smu->swctf_delayed_work, 1389 smu_swctf_delayed_work_handler); 1390 1391 smu_feature_cap_init(smu); 1392 1393 ret = smu_smc_table_sw_init(smu); 1394 if (ret) { 1395 dev_err(adev->dev, "Failed to sw init smc table!\n"); 1396 return ret; 1397 } 1398 1399 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1400 ret = smu_get_vbios_bootup_values(smu); 1401 if (ret) { 1402 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1403 return ret; 1404 } 1405 1406 ret = smu_init_pptable_microcode(smu); 1407 if (ret) { 1408 dev_err(adev->dev, "Failed to setup pptable firmware!\n"); 1409 return ret; 1410 } 1411 1412 ret = smu_register_irq_handler(smu); 1413 if (ret) { 1414 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 1415 return ret; 1416 } 1417 1418 /* If there is no way to query fan control mode, fan control is not supported */ 1419 if (!smu->ppt_funcs->get_fan_control_mode) 1420 smu->adev->pm.no_fan = true; 1421 1422 return 0; 1423 } 1424 1425 static int smu_sw_fini(struct amdgpu_ip_block *ip_block) 1426 { 1427 struct amdgpu_device *adev = ip_block->adev; 1428 struct smu_context *smu = adev->powerplay.pp_handle; 1429 int ret; 1430 1431 ret = smu_smc_table_sw_fini(smu); 1432 if (ret) { 1433 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 1434 return ret; 1435 } 1436 1437 if (smu->custom_profile_params) { 1438 kfree(smu->custom_profile_params); 1439 smu->custom_profile_params = NULL; 1440 } 1441 1442 smu_fini_microcode(smu); 1443 1444 return 0; 1445 } 1446 1447 static int smu_get_thermal_temperature_range(struct smu_context *smu) 1448 { 1449 struct amdgpu_device *adev = smu->adev; 1450 struct smu_temperature_range *range = 1451 &smu->thermal_range; 1452 int ret = 0; 1453 1454 if (!smu->ppt_funcs->get_thermal_temperature_range) 1455 return 0; 1456 1457 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 1458 if (ret) 1459 return ret; 1460 1461 adev->pm.dpm.thermal.min_temp = range->min; 1462 adev->pm.dpm.thermal.max_temp = range->max; 1463 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 1464 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 1465 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 1466 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 1467 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 1468 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 1469 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 1470 1471 return ret; 1472 } 1473 1474 /** 1475 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges 1476 * 1477 * @smu: smu_context pointer 1478 * 1479 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling. 1480 * Returns 0 on success, error on failure. 1481 */ 1482 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu) 1483 { 1484 struct wbrf_ranges_in_out wbrf_exclusion = {0}; 1485 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list; 1486 struct amdgpu_device *adev = smu->adev; 1487 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES; 1488 uint64_t start, end; 1489 int ret, i, j; 1490 1491 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion); 1492 if (ret) { 1493 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n"); 1494 return ret; 1495 } 1496 1497 /* 1498 * The exclusion ranges array we got might be filled with holes and duplicate 1499 * entries. For example: 1500 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...} 1501 * We need to do some sortups to eliminate those holes and duplicate entries. 1502 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...} 1503 */ 1504 for (i = 0; i < num_of_wbrf_ranges; i++) { 1505 start = wifi_bands[i].start; 1506 end = wifi_bands[i].end; 1507 1508 /* get the last valid entry to fill the intermediate hole */ 1509 if (!start && !end) { 1510 for (j = num_of_wbrf_ranges - 1; j > i; j--) 1511 if (wifi_bands[j].start && wifi_bands[j].end) 1512 break; 1513 1514 /* no valid entry left */ 1515 if (j <= i) 1516 break; 1517 1518 start = wifi_bands[i].start = wifi_bands[j].start; 1519 end = wifi_bands[i].end = wifi_bands[j].end; 1520 wifi_bands[j].start = 0; 1521 wifi_bands[j].end = 0; 1522 num_of_wbrf_ranges = j; 1523 } 1524 1525 /* eliminate duplicate entries */ 1526 for (j = i + 1; j < num_of_wbrf_ranges; j++) { 1527 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) { 1528 wifi_bands[j].start = 0; 1529 wifi_bands[j].end = 0; 1530 } 1531 } 1532 } 1533 1534 /* Send the sorted wifi_bands to PMFW */ 1535 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); 1536 /* Try to set the wifi_bands again */ 1537 if (unlikely(ret == -EBUSY)) { 1538 mdelay(5); 1539 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); 1540 } 1541 1542 return ret; 1543 } 1544 1545 /** 1546 * smu_wbrf_event_handler - handle notify events 1547 * 1548 * @nb: notifier block 1549 * @action: event type 1550 * @_arg: event data 1551 * 1552 * Calls relevant amdgpu function in response to wbrf event 1553 * notification from kernel. 1554 */ 1555 static int smu_wbrf_event_handler(struct notifier_block *nb, 1556 unsigned long action, void *_arg) 1557 { 1558 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier); 1559 1560 switch (action) { 1561 case WBRF_CHANGED: 1562 schedule_delayed_work(&smu->wbrf_delayed_work, 1563 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); 1564 break; 1565 default: 1566 return NOTIFY_DONE; 1567 } 1568 1569 return NOTIFY_OK; 1570 } 1571 1572 /** 1573 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired 1574 * 1575 * @work: struct work_struct pointer 1576 * 1577 * Flood is over and driver will consume the latest exclusion ranges. 1578 */ 1579 static void smu_wbrf_delayed_work_handler(struct work_struct *work) 1580 { 1581 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work); 1582 1583 smu_wbrf_handle_exclusion_ranges(smu); 1584 } 1585 1586 /** 1587 * smu_wbrf_support_check - check wbrf support 1588 * 1589 * @smu: smu_context pointer 1590 * 1591 * Verifies the ACPI interface whether wbrf is supported. 1592 */ 1593 static void smu_wbrf_support_check(struct smu_context *smu) 1594 { 1595 struct amdgpu_device *adev = smu->adev; 1596 1597 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf && 1598 acpi_amd_wbrf_supported_consumer(adev->dev); 1599 1600 if (smu->wbrf_supported) 1601 dev_info(adev->dev, "RF interference mitigation is supported\n"); 1602 } 1603 1604 /** 1605 * smu_wbrf_init - init driver wbrf support 1606 * 1607 * @smu: smu_context pointer 1608 * 1609 * Verifies the AMD ACPI interfaces and registers with the wbrf 1610 * notifier chain if wbrf feature is supported. 1611 * Returns 0 on success, error on failure. 1612 */ 1613 static int smu_wbrf_init(struct smu_context *smu) 1614 { 1615 int ret; 1616 1617 if (!smu->wbrf_supported) 1618 return 0; 1619 1620 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler); 1621 1622 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler; 1623 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier); 1624 if (ret) 1625 return ret; 1626 1627 /* 1628 * Some wifiband exclusion ranges may be already there 1629 * before our driver loaded. To make sure our driver 1630 * is awared of those exclusion ranges. 1631 */ 1632 schedule_delayed_work(&smu->wbrf_delayed_work, 1633 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); 1634 1635 return 0; 1636 } 1637 1638 /** 1639 * smu_wbrf_fini - tear down driver wbrf support 1640 * 1641 * @smu: smu_context pointer 1642 * 1643 * Unregisters with the wbrf notifier chain. 1644 */ 1645 static void smu_wbrf_fini(struct smu_context *smu) 1646 { 1647 if (!smu->wbrf_supported) 1648 return; 1649 1650 amd_wbrf_unregister_notifier(&smu->wbrf_notifier); 1651 1652 cancel_delayed_work_sync(&smu->wbrf_delayed_work); 1653 } 1654 1655 static int smu_smc_hw_setup(struct smu_context *smu) 1656 { 1657 struct smu_feature *feature = &smu->smu_feature; 1658 struct amdgpu_device *adev = smu->adev; 1659 uint8_t pcie_gen = 0, pcie_width = 0; 1660 uint64_t features_supported; 1661 int ret = 0; 1662 1663 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1664 case IP_VERSION(11, 0, 7): 1665 case IP_VERSION(11, 0, 11): 1666 case IP_VERSION(11, 5, 0): 1667 case IP_VERSION(11, 5, 2): 1668 case IP_VERSION(11, 0, 12): 1669 if (adev->in_suspend && smu_is_dpm_running(smu)) { 1670 dev_info(adev->dev, "dpm has been enabled\n"); 1671 ret = smu_system_features_control(smu, true); 1672 if (ret) 1673 dev_err(adev->dev, "Failed system features control!\n"); 1674 return ret; 1675 } 1676 break; 1677 default: 1678 break; 1679 } 1680 1681 ret = smu_init_display_count(smu, 0); 1682 if (ret) { 1683 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 1684 return ret; 1685 } 1686 1687 ret = smu_set_driver_table_location(smu); 1688 if (ret) { 1689 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 1690 return ret; 1691 } 1692 1693 /* 1694 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1695 */ 1696 ret = smu_set_tool_table_location(smu); 1697 if (ret) { 1698 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 1699 return ret; 1700 } 1701 1702 /* 1703 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1704 * pool location. 1705 */ 1706 ret = smu_notify_memory_pool_location(smu); 1707 if (ret) { 1708 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 1709 return ret; 1710 } 1711 1712 /* 1713 * It is assumed the pptable used before runpm is same as 1714 * the one used afterwards. Thus, we can reuse the stored 1715 * copy and do not need to resetup the pptable again. 1716 */ 1717 if (!adev->in_runpm) { 1718 ret = smu_setup_pptable(smu); 1719 if (ret) { 1720 dev_err(adev->dev, "Failed to setup pptable!\n"); 1721 return ret; 1722 } 1723 } 1724 1725 /* smu_dump_pptable(smu); */ 1726 1727 /* 1728 * With SCPM enabled, PSP is responsible for the PPTable transferring 1729 * (to SMU). Driver involvement is not needed and permitted. 1730 */ 1731 if (!adev->scpm_enabled) { 1732 /* 1733 * Copy pptable bo in the vram to smc with SMU MSGs such as 1734 * SetDriverDramAddr and TransferTableDram2Smu. 1735 */ 1736 ret = smu_write_pptable(smu); 1737 if (ret) { 1738 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 1739 return ret; 1740 } 1741 } 1742 1743 /* issue Run*Btc msg */ 1744 ret = smu_run_btc(smu); 1745 if (ret) 1746 return ret; 1747 1748 /* Enable UclkShadow on wbrf supported */ 1749 if (smu->wbrf_supported) { 1750 ret = smu_enable_uclk_shadow(smu, true); 1751 if (ret) { 1752 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n"); 1753 return ret; 1754 } 1755 } 1756 1757 /* 1758 * With SCPM enabled, these actions(and relevant messages) are 1759 * not needed and permitted. 1760 */ 1761 if (!adev->scpm_enabled) { 1762 ret = smu_feature_set_allowed_mask(smu); 1763 if (ret) { 1764 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 1765 return ret; 1766 } 1767 } 1768 1769 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) 1770 pcie_gen = 4; 1771 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1772 pcie_gen = 3; 1773 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1774 pcie_gen = 2; 1775 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1776 pcie_gen = 1; 1777 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1778 pcie_gen = 0; 1779 1780 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1781 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1782 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1783 */ 1784 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32) 1785 pcie_width = 7; 1786 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1787 pcie_width = 6; 1788 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1789 pcie_width = 5; 1790 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1791 pcie_width = 4; 1792 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1793 pcie_width = 3; 1794 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1795 pcie_width = 2; 1796 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1797 pcie_width = 1; 1798 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1799 if (ret) { 1800 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1801 return ret; 1802 } 1803 1804 ret = smu_system_features_control(smu, true); 1805 if (ret) { 1806 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 1807 return ret; 1808 } 1809 1810 smu_init_xgmi_plpd_mode(smu); 1811 1812 ret = smu_feature_get_enabled_mask(smu, &features_supported); 1813 if (ret) { 1814 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); 1815 return ret; 1816 } 1817 bitmap_copy(feature->supported, 1818 (unsigned long *)&features_supported, 1819 feature->feature_num); 1820 1821 if (!smu_is_dpm_running(smu)) 1822 dev_info(adev->dev, "dpm has been disabled\n"); 1823 1824 /* 1825 * Set initialized values (get from vbios) to dpm tables context such as 1826 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1827 * type of clks. 1828 */ 1829 ret = smu_set_default_dpm_table(smu); 1830 if (ret) { 1831 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 1832 return ret; 1833 } 1834 1835 ret = smu_get_thermal_temperature_range(smu); 1836 if (ret) { 1837 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1838 return ret; 1839 } 1840 1841 ret = smu_enable_thermal_alert(smu); 1842 if (ret) { 1843 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1844 return ret; 1845 } 1846 1847 ret = smu_notify_display_change(smu); 1848 if (ret) { 1849 dev_err(adev->dev, "Failed to notify display change!\n"); 1850 return ret; 1851 } 1852 1853 /* 1854 * Set min deep sleep dce fclk with bootup value from vbios via 1855 * SetMinDeepSleepDcefclk MSG. 1856 */ 1857 ret = smu_set_min_dcef_deep_sleep(smu, 1858 smu->smu_table.boot_values.dcefclk / 100); 1859 if (ret) { 1860 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n"); 1861 return ret; 1862 } 1863 1864 /* Init wbrf support. Properly setup the notifier */ 1865 ret = smu_wbrf_init(smu); 1866 if (ret) 1867 dev_err(adev->dev, "Error during wbrf init call\n"); 1868 1869 return ret; 1870 } 1871 1872 static int smu_start_smc_engine(struct smu_context *smu) 1873 { 1874 struct amdgpu_device *adev = smu->adev; 1875 int ret = 0; 1876 1877 if (amdgpu_virt_xgmi_migrate_enabled(adev)) 1878 smu_update_gpu_addresses(smu); 1879 1880 smu->smc_fw_state = SMU_FW_INIT; 1881 1882 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1883 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) { 1884 if (smu->ppt_funcs->load_microcode) { 1885 ret = smu->ppt_funcs->load_microcode(smu); 1886 if (ret) 1887 return ret; 1888 } 1889 } 1890 } 1891 1892 if (smu->ppt_funcs->check_fw_status) { 1893 ret = smu->ppt_funcs->check_fw_status(smu); 1894 if (ret) { 1895 dev_err(adev->dev, "SMC is not ready\n"); 1896 return ret; 1897 } 1898 } 1899 1900 /* 1901 * Send msg GetDriverIfVersion to check if the return value is equal 1902 * with DRIVER_IF_VERSION of smc header. 1903 */ 1904 ret = smu_check_fw_version(smu); 1905 if (ret) 1906 return ret; 1907 1908 return ret; 1909 } 1910 1911 static int smu_hw_init(struct amdgpu_ip_block *ip_block) 1912 { 1913 int i, ret; 1914 struct amdgpu_device *adev = ip_block->adev; 1915 struct smu_context *smu = adev->powerplay.pp_handle; 1916 1917 if (amdgpu_sriov_multi_vf_mode(adev)) { 1918 smu->pm_enabled = false; 1919 return 0; 1920 } 1921 1922 ret = smu_start_smc_engine(smu); 1923 if (ret) { 1924 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1925 return ret; 1926 } 1927 1928 /* 1929 * Check whether wbrf is supported. This needs to be done 1930 * before SMU setup starts since part of SMU configuration 1931 * relies on this. 1932 */ 1933 smu_wbrf_support_check(smu); 1934 1935 if (smu->is_apu) { 1936 ret = smu_set_gfx_imu_enable(smu); 1937 if (ret) 1938 return ret; 1939 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 1940 smu_dpm_set_vcn_enable(smu, true, i); 1941 smu_dpm_set_jpeg_enable(smu, true); 1942 smu_dpm_set_umsch_mm_enable(smu, true); 1943 smu_set_mall_enable(smu); 1944 smu_set_gfx_cgpg(smu, true); 1945 } 1946 1947 if (!smu->pm_enabled) 1948 return 0; 1949 1950 ret = smu_get_driver_allowed_feature_mask(smu); 1951 if (ret) 1952 return ret; 1953 1954 ret = smu_smc_hw_setup(smu); 1955 if (ret) { 1956 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1957 return ret; 1958 } 1959 1960 /* 1961 * Move maximum sustainable clock retrieving here considering 1962 * 1. It is not needed on resume(from S3). 1963 * 2. DAL settings come between .hw_init and .late_init of SMU. 1964 * And DAL needs to know the maximum sustainable clocks. Thus 1965 * it cannot be put in .late_init(). 1966 */ 1967 ret = smu_init_max_sustainable_clocks(smu); 1968 if (ret) { 1969 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1970 return ret; 1971 } 1972 1973 adev->pm.dpm_enabled = true; 1974 1975 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1976 1977 return 0; 1978 } 1979 1980 static int smu_disable_dpms(struct smu_context *smu) 1981 { 1982 struct amdgpu_device *adev = smu->adev; 1983 int ret = 0; 1984 bool use_baco = !smu->is_apu && 1985 ((amdgpu_in_reset(adev) && 1986 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1987 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); 1988 1989 /* 1990 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) 1991 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues. 1992 */ 1993 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1994 case IP_VERSION(13, 0, 0): 1995 case IP_VERSION(13, 0, 7): 1996 case IP_VERSION(13, 0, 10): 1997 case IP_VERSION(14, 0, 2): 1998 case IP_VERSION(14, 0, 3): 1999 return 0; 2000 default: 2001 break; 2002 } 2003 2004 /* 2005 * For custom pptable uploading, skip the DPM features 2006 * disable process on Navi1x ASICs. 2007 * - As the gfx related features are under control of 2008 * RLC on those ASICs. RLC reinitialization will be 2009 * needed to reenable them. That will cost much more 2010 * efforts. 2011 * 2012 * - SMU firmware can handle the DPM reenablement 2013 * properly. 2014 */ 2015 if (smu->uploading_custom_pp_table) { 2016 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2017 case IP_VERSION(11, 0, 0): 2018 case IP_VERSION(11, 0, 5): 2019 case IP_VERSION(11, 0, 9): 2020 case IP_VERSION(11, 0, 7): 2021 case IP_VERSION(11, 0, 11): 2022 case IP_VERSION(11, 5, 0): 2023 case IP_VERSION(11, 5, 2): 2024 case IP_VERSION(11, 0, 12): 2025 case IP_VERSION(11, 0, 13): 2026 return 0; 2027 default: 2028 break; 2029 } 2030 } 2031 2032 /* 2033 * For Sienna_Cichlid, PMFW will handle the features disablement properly 2034 * on BACO in. Driver involvement is unnecessary. 2035 */ 2036 if (use_baco) { 2037 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2038 case IP_VERSION(11, 0, 7): 2039 case IP_VERSION(11, 0, 0): 2040 case IP_VERSION(11, 0, 5): 2041 case IP_VERSION(11, 0, 9): 2042 case IP_VERSION(13, 0, 7): 2043 return 0; 2044 default: 2045 break; 2046 } 2047 } 2048 2049 /* 2050 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly 2051 * for gpu reset and S0i3 cases. Driver involvement is unnecessary. 2052 */ 2053 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 && 2054 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix)) 2055 return 0; 2056 2057 /* vangogh s0ix */ 2058 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) || 2059 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) && 2060 adev->in_s0ix) 2061 return 0; 2062 2063 /* 2064 * For gpu reset, runpm and hibernation through BACO, 2065 * BACO feature has to be kept enabled. 2066 */ 2067 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 2068 ret = smu_disable_all_features_with_exception(smu, 2069 SMU_FEATURE_BACO_BIT); 2070 if (ret) 2071 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 2072 } else { 2073 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */ 2074 if (!adev->scpm_enabled) { 2075 ret = smu_system_features_control(smu, false); 2076 if (ret) 2077 dev_err(adev->dev, "Failed to disable smu features.\n"); 2078 } 2079 } 2080 2081 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction. 2082 * otherwise SMU will hang while interacting with RLC if RLC is halted 2083 * this is a WA for Vangogh asic which fix the SMU hang issue. 2084 */ 2085 ret = smu_notify_rlc_state(smu, false); 2086 if (ret) { 2087 dev_err(adev->dev, "Fail to notify rlc status!\n"); 2088 return ret; 2089 } 2090 2091 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) && 2092 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) && 2093 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop) 2094 adev->gfx.rlc.funcs->stop(adev); 2095 2096 return ret; 2097 } 2098 2099 static int smu_smc_hw_cleanup(struct smu_context *smu) 2100 { 2101 struct amdgpu_device *adev = smu->adev; 2102 int ret = 0; 2103 2104 smu_wbrf_fini(smu); 2105 2106 cancel_work_sync(&smu->throttling_logging_work); 2107 cancel_work_sync(&smu->interrupt_work); 2108 2109 ret = smu_disable_thermal_alert(smu); 2110 if (ret) { 2111 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 2112 return ret; 2113 } 2114 2115 cancel_delayed_work_sync(&smu->swctf_delayed_work); 2116 2117 ret = smu_disable_dpms(smu); 2118 if (ret) { 2119 dev_err(adev->dev, "Fail to disable dpm features!\n"); 2120 return ret; 2121 } 2122 2123 return 0; 2124 } 2125 2126 static int smu_reset_mp1_state(struct smu_context *smu) 2127 { 2128 struct amdgpu_device *adev = smu->adev; 2129 int ret = 0; 2130 2131 if ((!adev->in_runpm) && (!adev->in_suspend) && 2132 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) == 2133 IP_VERSION(13, 0, 10) && 2134 !amdgpu_device_has_display_hardware(adev)) 2135 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); 2136 2137 return ret; 2138 } 2139 2140 static int smu_hw_fini(struct amdgpu_ip_block *ip_block) 2141 { 2142 struct amdgpu_device *adev = ip_block->adev; 2143 struct smu_context *smu = adev->powerplay.pp_handle; 2144 int i, ret; 2145 2146 if (amdgpu_sriov_multi_vf_mode(adev)) 2147 return 0; 2148 2149 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 2150 smu_dpm_set_vcn_enable(smu, false, i); 2151 adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE; 2152 } 2153 smu_dpm_set_jpeg_enable(smu, false); 2154 adev->jpeg.cur_state = AMD_PG_STATE_GATE; 2155 smu_dpm_set_umsch_mm_enable(smu, false); 2156 2157 if (!smu->pm_enabled) 2158 return 0; 2159 2160 adev->pm.dpm_enabled = false; 2161 2162 ret = smu_smc_hw_cleanup(smu); 2163 if (ret) 2164 return ret; 2165 2166 ret = smu_reset_mp1_state(smu); 2167 if (ret) 2168 return ret; 2169 2170 return 0; 2171 } 2172 2173 static void smu_late_fini(struct amdgpu_ip_block *ip_block) 2174 { 2175 struct amdgpu_device *adev = ip_block->adev; 2176 struct smu_context *smu = adev->powerplay.pp_handle; 2177 2178 kfree(smu); 2179 } 2180 2181 static int smu_reset(struct smu_context *smu) 2182 { 2183 struct amdgpu_device *adev = smu->adev; 2184 struct amdgpu_ip_block *ip_block; 2185 int ret; 2186 2187 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC); 2188 if (!ip_block) 2189 return -EINVAL; 2190 2191 ret = smu_hw_fini(ip_block); 2192 if (ret) 2193 return ret; 2194 2195 ret = smu_hw_init(ip_block); 2196 if (ret) 2197 return ret; 2198 2199 ret = smu_late_init(ip_block); 2200 if (ret) 2201 return ret; 2202 2203 return 0; 2204 } 2205 2206 static int smu_suspend(struct amdgpu_ip_block *ip_block) 2207 { 2208 struct amdgpu_device *adev = ip_block->adev; 2209 struct smu_context *smu = adev->powerplay.pp_handle; 2210 int ret; 2211 uint64_t count; 2212 2213 if (amdgpu_sriov_multi_vf_mode(adev)) 2214 return 0; 2215 2216 if (!smu->pm_enabled) 2217 return 0; 2218 2219 adev->pm.dpm_enabled = false; 2220 2221 ret = smu_smc_hw_cleanup(smu); 2222 if (ret) 2223 return ret; 2224 2225 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 2226 2227 smu_set_gfx_cgpg(smu, false); 2228 2229 /* 2230 * pwfw resets entrycount when device is suspended, so we save the 2231 * last value to be used when we resume to keep it consistent 2232 */ 2233 ret = smu_get_entrycount_gfxoff(smu, &count); 2234 if (!ret) 2235 adev->gfx.gfx_off_entrycount = count; 2236 2237 /* clear this on suspend so it will get reprogrammed on resume */ 2238 smu->workload_mask = 0; 2239 2240 return 0; 2241 } 2242 2243 static int smu_resume(struct amdgpu_ip_block *ip_block) 2244 { 2245 int ret; 2246 struct amdgpu_device *adev = ip_block->adev; 2247 struct smu_context *smu = adev->powerplay.pp_handle; 2248 2249 if (amdgpu_sriov_multi_vf_mode(adev)) 2250 return 0; 2251 2252 if (!smu->pm_enabled) 2253 return 0; 2254 2255 dev_info(adev->dev, "SMU is resuming...\n"); 2256 2257 ret = smu_start_smc_engine(smu); 2258 if (ret) { 2259 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 2260 return ret; 2261 } 2262 2263 ret = smu_smc_hw_setup(smu); 2264 if (ret) { 2265 dev_err(adev->dev, "Failed to setup smc hw!\n"); 2266 return ret; 2267 } 2268 2269 ret = smu_set_gfx_imu_enable(smu); 2270 if (ret) 2271 return ret; 2272 2273 smu_set_gfx_cgpg(smu, true); 2274 2275 smu->disable_uclk_switch = 0; 2276 2277 adev->pm.dpm_enabled = true; 2278 2279 dev_info(adev->dev, "SMU is resumed successfully!\n"); 2280 2281 return 0; 2282 } 2283 2284 static int smu_display_configuration_change(void *handle, 2285 const struct amd_pp_display_configuration *display_config) 2286 { 2287 struct smu_context *smu = handle; 2288 2289 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2290 return -EOPNOTSUPP; 2291 2292 if (!display_config) 2293 return -EINVAL; 2294 2295 smu_set_min_dcef_deep_sleep(smu, 2296 display_config->min_dcef_deep_sleep_set_clk / 100); 2297 2298 return 0; 2299 } 2300 2301 static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block, 2302 enum amd_clockgating_state state) 2303 { 2304 return 0; 2305 } 2306 2307 static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block, 2308 enum amd_powergating_state state) 2309 { 2310 return 0; 2311 } 2312 2313 static int smu_enable_umd_pstate(void *handle, 2314 enum amd_dpm_forced_level *level) 2315 { 2316 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 2317 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 2318 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 2319 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 2320 2321 struct smu_context *smu = (struct smu_context*)(handle); 2322 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2323 2324 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 2325 return -EINVAL; 2326 2327 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 2328 /* enter umd pstate, save current level, disable gfx cg*/ 2329 if (*level & profile_mode_mask) { 2330 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 2331 smu_gpo_control(smu, false); 2332 smu_gfx_ulv_control(smu, false); 2333 smu_deep_sleep_control(smu, false); 2334 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 2335 } 2336 } else { 2337 /* exit umd pstate, restore level, enable gfx cg*/ 2338 if (!(*level & profile_mode_mask)) { 2339 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 2340 *level = smu_dpm_ctx->saved_dpm_level; 2341 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 2342 smu_deep_sleep_control(smu, true); 2343 smu_gfx_ulv_control(smu, true); 2344 smu_gpo_control(smu, true); 2345 } 2346 } 2347 2348 return 0; 2349 } 2350 2351 static int smu_bump_power_profile_mode(struct smu_context *smu, 2352 long *custom_params, 2353 u32 custom_params_max_idx) 2354 { 2355 u32 workload_mask = 0; 2356 int i, ret = 0; 2357 2358 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 2359 if (smu->workload_refcount[i]) 2360 workload_mask |= 1 << i; 2361 } 2362 2363 if (smu->workload_mask == workload_mask) 2364 return 0; 2365 2366 if (smu->ppt_funcs->set_power_profile_mode) 2367 ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask, 2368 custom_params, 2369 custom_params_max_idx); 2370 2371 if (!ret) 2372 smu->workload_mask = workload_mask; 2373 2374 return ret; 2375 } 2376 2377 static void smu_power_profile_mode_get(struct smu_context *smu, 2378 enum PP_SMC_POWER_PROFILE profile_mode) 2379 { 2380 smu->workload_refcount[profile_mode]++; 2381 } 2382 2383 static void smu_power_profile_mode_put(struct smu_context *smu, 2384 enum PP_SMC_POWER_PROFILE profile_mode) 2385 { 2386 if (smu->workload_refcount[profile_mode]) 2387 smu->workload_refcount[profile_mode]--; 2388 } 2389 2390 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 2391 enum amd_dpm_forced_level level, 2392 bool skip_display_settings) 2393 { 2394 int ret = 0; 2395 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2396 2397 if (!skip_display_settings) { 2398 ret = smu_display_config_changed(smu); 2399 if (ret) { 2400 dev_err(smu->adev->dev, "Failed to change display config!"); 2401 return ret; 2402 } 2403 } 2404 2405 ret = smu_apply_clocks_adjust_rules(smu); 2406 if (ret) { 2407 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 2408 return ret; 2409 } 2410 2411 if (!skip_display_settings) { 2412 ret = smu_notify_smc_display_config(smu); 2413 if (ret) { 2414 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 2415 return ret; 2416 } 2417 } 2418 2419 if (smu_dpm_ctx->dpm_level != level) { 2420 ret = smu_asic_set_performance_level(smu, level); 2421 if (ret) { 2422 if (ret == -EOPNOTSUPP) 2423 dev_info(smu->adev->dev, "set performance level %d not supported", 2424 level); 2425 else 2426 dev_err(smu->adev->dev, "Failed to set performance level %d", 2427 level); 2428 return ret; 2429 } 2430 2431 /* update the saved copy */ 2432 smu_dpm_ctx->dpm_level = level; 2433 } 2434 2435 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 2436 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) 2437 smu_bump_power_profile_mode(smu, NULL, 0); 2438 2439 return ret; 2440 } 2441 2442 static int smu_handle_task(struct smu_context *smu, 2443 enum amd_dpm_forced_level level, 2444 enum amd_pp_task task_id) 2445 { 2446 int ret = 0; 2447 2448 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2449 return -EOPNOTSUPP; 2450 2451 switch (task_id) { 2452 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 2453 ret = smu_pre_display_config_changed(smu); 2454 if (ret) 2455 return ret; 2456 ret = smu_adjust_power_state_dynamic(smu, level, false); 2457 break; 2458 case AMD_PP_TASK_COMPLETE_INIT: 2459 ret = smu_adjust_power_state_dynamic(smu, level, true); 2460 break; 2461 case AMD_PP_TASK_READJUST_POWER_STATE: 2462 ret = smu_adjust_power_state_dynamic(smu, level, true); 2463 break; 2464 default: 2465 break; 2466 } 2467 2468 return ret; 2469 } 2470 2471 static int smu_handle_dpm_task(void *handle, 2472 enum amd_pp_task task_id, 2473 enum amd_pm_state_type *user_state) 2474 { 2475 struct smu_context *smu = handle; 2476 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 2477 2478 return smu_handle_task(smu, smu_dpm->dpm_level, task_id); 2479 2480 } 2481 2482 static int smu_switch_power_profile(void *handle, 2483 enum PP_SMC_POWER_PROFILE type, 2484 bool enable) 2485 { 2486 struct smu_context *smu = handle; 2487 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2488 int ret; 2489 2490 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2491 return -EOPNOTSUPP; 2492 2493 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 2494 return -EINVAL; 2495 2496 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 2497 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 2498 if (enable) 2499 smu_power_profile_mode_get(smu, type); 2500 else 2501 smu_power_profile_mode_put(smu, type); 2502 /* don't switch the active workload when paused */ 2503 if (smu->pause_workload) 2504 ret = 0; 2505 else 2506 ret = smu_bump_power_profile_mode(smu, NULL, 0); 2507 if (ret) { 2508 if (enable) 2509 smu_power_profile_mode_put(smu, type); 2510 else 2511 smu_power_profile_mode_get(smu, type); 2512 return ret; 2513 } 2514 } 2515 2516 return 0; 2517 } 2518 2519 static int smu_pause_power_profile(void *handle, 2520 bool pause) 2521 { 2522 struct smu_context *smu = handle; 2523 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2524 u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 2525 int ret; 2526 2527 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2528 return -EOPNOTSUPP; 2529 2530 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 2531 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 2532 smu->pause_workload = pause; 2533 2534 /* force to bootup default profile */ 2535 if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode) 2536 ret = smu->ppt_funcs->set_power_profile_mode(smu, 2537 workload_mask, 2538 NULL, 2539 0); 2540 else 2541 ret = smu_bump_power_profile_mode(smu, NULL, 0); 2542 return ret; 2543 } 2544 2545 return 0; 2546 } 2547 2548 static enum amd_dpm_forced_level smu_get_performance_level(void *handle) 2549 { 2550 struct smu_context *smu = handle; 2551 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2552 2553 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2554 return -EOPNOTSUPP; 2555 2556 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 2557 return -EINVAL; 2558 2559 return smu_dpm_ctx->dpm_level; 2560 } 2561 2562 static int smu_force_performance_level(void *handle, 2563 enum amd_dpm_forced_level level) 2564 { 2565 struct smu_context *smu = handle; 2566 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2567 int ret = 0; 2568 2569 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2570 return -EOPNOTSUPP; 2571 2572 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 2573 return -EINVAL; 2574 2575 ret = smu_enable_umd_pstate(smu, &level); 2576 if (ret) 2577 return ret; 2578 2579 ret = smu_handle_task(smu, level, 2580 AMD_PP_TASK_READJUST_POWER_STATE); 2581 2582 /* reset user dpm clock state */ 2583 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 2584 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); 2585 smu->user_dpm_profile.clk_dependency = 0; 2586 } 2587 2588 return ret; 2589 } 2590 2591 static int smu_set_display_count(void *handle, uint32_t count) 2592 { 2593 struct smu_context *smu = handle; 2594 2595 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2596 return -EOPNOTSUPP; 2597 2598 return smu_init_display_count(smu, count); 2599 } 2600 2601 static int smu_force_smuclk_levels(struct smu_context *smu, 2602 enum smu_clk_type clk_type, 2603 uint32_t mask) 2604 { 2605 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2606 int ret = 0; 2607 2608 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2609 return -EOPNOTSUPP; 2610 2611 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 2612 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 2613 return -EINVAL; 2614 } 2615 2616 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { 2617 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 2618 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2619 smu->user_dpm_profile.clk_mask[clk_type] = mask; 2620 smu_set_user_clk_dependencies(smu, clk_type); 2621 } 2622 } 2623 2624 return ret; 2625 } 2626 2627 static int smu_force_ppclk_levels(void *handle, 2628 enum pp_clock_type type, 2629 uint32_t mask) 2630 { 2631 struct smu_context *smu = handle; 2632 enum smu_clk_type clk_type; 2633 2634 switch (type) { 2635 case PP_SCLK: 2636 clk_type = SMU_SCLK; break; 2637 case PP_MCLK: 2638 clk_type = SMU_MCLK; break; 2639 case PP_PCIE: 2640 clk_type = SMU_PCIE; break; 2641 case PP_SOCCLK: 2642 clk_type = SMU_SOCCLK; break; 2643 case PP_FCLK: 2644 clk_type = SMU_FCLK; break; 2645 case PP_DCEFCLK: 2646 clk_type = SMU_DCEFCLK; break; 2647 case PP_VCLK: 2648 clk_type = SMU_VCLK; break; 2649 case PP_VCLK1: 2650 clk_type = SMU_VCLK1; break; 2651 case PP_DCLK: 2652 clk_type = SMU_DCLK; break; 2653 case PP_DCLK1: 2654 clk_type = SMU_DCLK1; break; 2655 case OD_SCLK: 2656 clk_type = SMU_OD_SCLK; break; 2657 case OD_MCLK: 2658 clk_type = SMU_OD_MCLK; break; 2659 case OD_VDDC_CURVE: 2660 clk_type = SMU_OD_VDDC_CURVE; break; 2661 case OD_RANGE: 2662 clk_type = SMU_OD_RANGE; break; 2663 default: 2664 return -EINVAL; 2665 } 2666 2667 return smu_force_smuclk_levels(smu, clk_type, mask); 2668 } 2669 2670 /* 2671 * On system suspending or resetting, the dpm_enabled 2672 * flag will be cleared. So that those SMU services which 2673 * are not supported will be gated. 2674 * However, the mp1 state setting should still be granted 2675 * even if the dpm_enabled cleared. 2676 */ 2677 static int smu_set_mp1_state(void *handle, 2678 enum pp_mp1_state mp1_state) 2679 { 2680 struct smu_context *smu = handle; 2681 int ret = 0; 2682 2683 if (!smu->pm_enabled) 2684 return -EOPNOTSUPP; 2685 2686 if (smu->ppt_funcs && 2687 smu->ppt_funcs->set_mp1_state) 2688 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); 2689 2690 return ret; 2691 } 2692 2693 static int smu_set_df_cstate(void *handle, 2694 enum pp_df_cstate state) 2695 { 2696 struct smu_context *smu = handle; 2697 int ret = 0; 2698 2699 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2700 return -EOPNOTSUPP; 2701 2702 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 2703 return 0; 2704 2705 ret = smu->ppt_funcs->set_df_cstate(smu, state); 2706 if (ret) 2707 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 2708 2709 return ret; 2710 } 2711 2712 int smu_write_watermarks_table(struct smu_context *smu) 2713 { 2714 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2715 return -EOPNOTSUPP; 2716 2717 return smu_set_watermarks_table(smu, NULL); 2718 } 2719 2720 static int smu_set_watermarks_for_clock_ranges(void *handle, 2721 struct pp_smu_wm_range_sets *clock_ranges) 2722 { 2723 struct smu_context *smu = handle; 2724 2725 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2726 return -EOPNOTSUPP; 2727 2728 if (smu->disable_watermark) 2729 return 0; 2730 2731 return smu_set_watermarks_table(smu, clock_ranges); 2732 } 2733 2734 int smu_set_ac_dc(struct smu_context *smu) 2735 { 2736 int ret = 0; 2737 2738 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2739 return -EOPNOTSUPP; 2740 2741 /* controlled by firmware */ 2742 if (smu->dc_controlled_by_gpio) 2743 return 0; 2744 2745 ret = smu_set_power_source(smu, 2746 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 2747 SMU_POWER_SOURCE_DC); 2748 if (ret) 2749 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 2750 smu->adev->pm.ac_power ? "AC" : "DC"); 2751 2752 return ret; 2753 } 2754 2755 const struct amd_ip_funcs smu_ip_funcs = { 2756 .name = "smu", 2757 .early_init = smu_early_init, 2758 .late_init = smu_late_init, 2759 .sw_init = smu_sw_init, 2760 .sw_fini = smu_sw_fini, 2761 .hw_init = smu_hw_init, 2762 .hw_fini = smu_hw_fini, 2763 .late_fini = smu_late_fini, 2764 .suspend = smu_suspend, 2765 .resume = smu_resume, 2766 .is_idle = NULL, 2767 .check_soft_reset = NULL, 2768 .wait_for_idle = NULL, 2769 .soft_reset = NULL, 2770 .set_clockgating_state = smu_set_clockgating_state, 2771 .set_powergating_state = smu_set_powergating_state, 2772 }; 2773 2774 const struct amdgpu_ip_block_version smu_v11_0_ip_block = { 2775 .type = AMD_IP_BLOCK_TYPE_SMC, 2776 .major = 11, 2777 .minor = 0, 2778 .rev = 0, 2779 .funcs = &smu_ip_funcs, 2780 }; 2781 2782 const struct amdgpu_ip_block_version smu_v12_0_ip_block = { 2783 .type = AMD_IP_BLOCK_TYPE_SMC, 2784 .major = 12, 2785 .minor = 0, 2786 .rev = 0, 2787 .funcs = &smu_ip_funcs, 2788 }; 2789 2790 const struct amdgpu_ip_block_version smu_v13_0_ip_block = { 2791 .type = AMD_IP_BLOCK_TYPE_SMC, 2792 .major = 13, 2793 .minor = 0, 2794 .rev = 0, 2795 .funcs = &smu_ip_funcs, 2796 }; 2797 2798 const struct amdgpu_ip_block_version smu_v14_0_ip_block = { 2799 .type = AMD_IP_BLOCK_TYPE_SMC, 2800 .major = 14, 2801 .minor = 0, 2802 .rev = 0, 2803 .funcs = &smu_ip_funcs, 2804 }; 2805 2806 const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle) 2807 { 2808 struct smu_context *smu = (struct smu_context *)handle; 2809 const struct ras_smu_drv *tmp = NULL; 2810 int ret; 2811 2812 ret = smu_get_ras_smu_drv(smu, &tmp); 2813 2814 return ret ? NULL : tmp; 2815 } 2816 2817 static int smu_load_microcode(void *handle) 2818 { 2819 struct smu_context *smu = handle; 2820 struct amdgpu_device *adev = smu->adev; 2821 int ret = 0; 2822 2823 if (!smu->pm_enabled) 2824 return -EOPNOTSUPP; 2825 2826 /* This should be used for non PSP loading */ 2827 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 2828 return 0; 2829 2830 if (smu->ppt_funcs->load_microcode) { 2831 ret = smu->ppt_funcs->load_microcode(smu); 2832 if (ret) { 2833 dev_err(adev->dev, "Load microcode failed\n"); 2834 return ret; 2835 } 2836 } 2837 2838 if (smu->ppt_funcs->check_fw_status) { 2839 ret = smu->ppt_funcs->check_fw_status(smu); 2840 if (ret) { 2841 dev_err(adev->dev, "SMC is not ready\n"); 2842 return ret; 2843 } 2844 } 2845 2846 return ret; 2847 } 2848 2849 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2850 { 2851 int ret = 0; 2852 2853 if (smu->ppt_funcs->set_gfx_cgpg) 2854 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2855 2856 return ret; 2857 } 2858 2859 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) 2860 { 2861 struct smu_context *smu = handle; 2862 int ret = 0; 2863 2864 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2865 return -EOPNOTSUPP; 2866 2867 if (!smu->ppt_funcs->set_fan_speed_rpm) 2868 return -EOPNOTSUPP; 2869 2870 if (speed == U32_MAX) 2871 return -EINVAL; 2872 2873 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 2874 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2875 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; 2876 smu->user_dpm_profile.fan_speed_rpm = speed; 2877 2878 /* Override custom PWM setting as they cannot co-exist */ 2879 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; 2880 smu->user_dpm_profile.fan_speed_pwm = 0; 2881 } 2882 2883 return ret; 2884 } 2885 2886 /** 2887 * smu_get_power_limit - Request one of the SMU Power Limits 2888 * 2889 * @handle: pointer to smu context 2890 * @limit: requested limit is written back to this variable 2891 * @pp_limit_level: &pp_power_limit_level which limit of the power to return 2892 * @pp_power_type: &pp_power_type type of power 2893 * Return: 0 on success, <0 on error 2894 * 2895 */ 2896 int smu_get_power_limit(void *handle, 2897 uint32_t *limit, 2898 enum pp_power_limit_level pp_limit_level, 2899 enum pp_power_type pp_power_type) 2900 { 2901 struct smu_context *smu = handle; 2902 struct amdgpu_device *adev = smu->adev; 2903 enum smu_ppt_limit_level limit_level; 2904 uint32_t limit_type; 2905 int ret = 0; 2906 2907 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2908 return -EOPNOTSUPP; 2909 2910 switch (pp_power_type) { 2911 case PP_PWR_TYPE_SUSTAINED: 2912 limit_type = SMU_DEFAULT_PPT_LIMIT; 2913 break; 2914 case PP_PWR_TYPE_FAST: 2915 limit_type = SMU_FAST_PPT_LIMIT; 2916 break; 2917 default: 2918 return -EOPNOTSUPP; 2919 } 2920 2921 switch (pp_limit_level) { 2922 case PP_PWR_LIMIT_CURRENT: 2923 limit_level = SMU_PPT_LIMIT_CURRENT; 2924 break; 2925 case PP_PWR_LIMIT_DEFAULT: 2926 limit_level = SMU_PPT_LIMIT_DEFAULT; 2927 break; 2928 case PP_PWR_LIMIT_MAX: 2929 limit_level = SMU_PPT_LIMIT_MAX; 2930 break; 2931 case PP_PWR_LIMIT_MIN: 2932 limit_level = SMU_PPT_LIMIT_MIN; 2933 break; 2934 default: 2935 return -EOPNOTSUPP; 2936 } 2937 2938 if (limit_type != SMU_DEFAULT_PPT_LIMIT) { 2939 if (smu->ppt_funcs->get_ppt_limit) 2940 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); 2941 } else { 2942 switch (limit_level) { 2943 case SMU_PPT_LIMIT_CURRENT: 2944 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2945 case IP_VERSION(13, 0, 2): 2946 case IP_VERSION(13, 0, 6): 2947 case IP_VERSION(13, 0, 12): 2948 case IP_VERSION(13, 0, 14): 2949 case IP_VERSION(11, 0, 7): 2950 case IP_VERSION(11, 0, 11): 2951 case IP_VERSION(11, 0, 12): 2952 case IP_VERSION(11, 0, 13): 2953 ret = smu_get_asic_power_limits(smu, 2954 &smu->current_power_limit, 2955 NULL, NULL, NULL); 2956 break; 2957 default: 2958 break; 2959 } 2960 *limit = smu->current_power_limit; 2961 break; 2962 case SMU_PPT_LIMIT_DEFAULT: 2963 *limit = smu->default_power_limit; 2964 break; 2965 case SMU_PPT_LIMIT_MAX: 2966 *limit = smu->max_power_limit; 2967 break; 2968 case SMU_PPT_LIMIT_MIN: 2969 *limit = smu->min_power_limit; 2970 break; 2971 default: 2972 return -EINVAL; 2973 } 2974 } 2975 2976 return ret; 2977 } 2978 2979 static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) 2980 { 2981 struct smu_context *smu = handle; 2982 int ret = 0; 2983 2984 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2985 return -EOPNOTSUPP; 2986 2987 if (limit_type == SMU_DEFAULT_PPT_LIMIT) { 2988 if (!limit) 2989 limit = smu->current_power_limit; 2990 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { 2991 dev_err(smu->adev->dev, 2992 "New power limit (%d) is out of range [%d,%d]\n", 2993 limit, smu->min_power_limit, smu->max_power_limit); 2994 return -EINVAL; 2995 } 2996 } 2997 2998 if (smu->ppt_funcs->set_power_limit) { 2999 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 3000 if (ret) 3001 return ret; 3002 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 3003 smu->user_dpm_profile.power_limits[limit_type] = limit; 3004 } 3005 3006 return 0; 3007 } 3008 3009 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 3010 { 3011 int ret = 0; 3012 3013 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3014 return -EOPNOTSUPP; 3015 3016 if (smu->ppt_funcs->print_clk_levels) 3017 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 3018 3019 return ret; 3020 } 3021 3022 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) 3023 { 3024 enum smu_clk_type clk_type; 3025 3026 switch (type) { 3027 case PP_SCLK: 3028 clk_type = SMU_SCLK; break; 3029 case PP_MCLK: 3030 clk_type = SMU_MCLK; break; 3031 case PP_PCIE: 3032 clk_type = SMU_PCIE; break; 3033 case PP_SOCCLK: 3034 clk_type = SMU_SOCCLK; break; 3035 case PP_FCLK: 3036 clk_type = SMU_FCLK; break; 3037 case PP_DCEFCLK: 3038 clk_type = SMU_DCEFCLK; break; 3039 case PP_VCLK: 3040 clk_type = SMU_VCLK; break; 3041 case PP_VCLK1: 3042 clk_type = SMU_VCLK1; break; 3043 case PP_DCLK: 3044 clk_type = SMU_DCLK; break; 3045 case PP_DCLK1: 3046 clk_type = SMU_DCLK1; break; 3047 case PP_ISPICLK: 3048 clk_type = SMU_ISPICLK; 3049 break; 3050 case PP_ISPXCLK: 3051 clk_type = SMU_ISPXCLK; 3052 break; 3053 case OD_SCLK: 3054 clk_type = SMU_OD_SCLK; break; 3055 case OD_MCLK: 3056 clk_type = SMU_OD_MCLK; break; 3057 case OD_VDDC_CURVE: 3058 clk_type = SMU_OD_VDDC_CURVE; break; 3059 case OD_RANGE: 3060 clk_type = SMU_OD_RANGE; break; 3061 case OD_VDDGFX_OFFSET: 3062 clk_type = SMU_OD_VDDGFX_OFFSET; break; 3063 case OD_CCLK: 3064 clk_type = SMU_OD_CCLK; break; 3065 case OD_FAN_CURVE: 3066 clk_type = SMU_OD_FAN_CURVE; break; 3067 case OD_ACOUSTIC_LIMIT: 3068 clk_type = SMU_OD_ACOUSTIC_LIMIT; break; 3069 case OD_ACOUSTIC_TARGET: 3070 clk_type = SMU_OD_ACOUSTIC_TARGET; break; 3071 case OD_FAN_TARGET_TEMPERATURE: 3072 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break; 3073 case OD_FAN_MINIMUM_PWM: 3074 clk_type = SMU_OD_FAN_MINIMUM_PWM; break; 3075 case OD_FAN_ZERO_RPM_ENABLE: 3076 clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break; 3077 case OD_FAN_ZERO_RPM_STOP_TEMP: 3078 clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break; 3079 default: 3080 clk_type = SMU_CLK_COUNT; break; 3081 } 3082 3083 return clk_type; 3084 } 3085 3086 static int smu_print_ppclk_levels(void *handle, 3087 enum pp_clock_type type, 3088 char *buf) 3089 { 3090 struct smu_context *smu = handle; 3091 enum smu_clk_type clk_type; 3092 3093 clk_type = smu_convert_to_smuclk(type); 3094 if (clk_type == SMU_CLK_COUNT) 3095 return -EINVAL; 3096 3097 return smu_print_smuclk_levels(smu, clk_type, buf); 3098 } 3099 3100 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset) 3101 { 3102 struct smu_context *smu = handle; 3103 enum smu_clk_type clk_type; 3104 3105 clk_type = smu_convert_to_smuclk(type); 3106 if (clk_type == SMU_CLK_COUNT) 3107 return -EINVAL; 3108 3109 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3110 return -EOPNOTSUPP; 3111 3112 if (!smu->ppt_funcs->emit_clk_levels) 3113 return -ENOENT; 3114 3115 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); 3116 3117 } 3118 3119 static int smu_od_edit_dpm_table(void *handle, 3120 enum PP_OD_DPM_TABLE_COMMAND type, 3121 long *input, uint32_t size) 3122 { 3123 struct smu_context *smu = handle; 3124 int ret = 0; 3125 3126 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3127 return -EOPNOTSUPP; 3128 3129 if (smu->ppt_funcs->od_edit_dpm_table) { 3130 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 3131 } 3132 3133 return ret; 3134 } 3135 3136 static int smu_read_sensor(void *handle, 3137 int sensor, 3138 void *data, 3139 int *size_arg) 3140 { 3141 struct smu_context *smu = handle; 3142 struct amdgpu_device *adev = smu->adev; 3143 struct smu_umd_pstate_table *pstate_table = 3144 &smu->pstate_table; 3145 int i, ret = 0; 3146 uint32_t *size, size_val; 3147 3148 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3149 return -EOPNOTSUPP; 3150 3151 if (!data || !size_arg) 3152 return -EINVAL; 3153 3154 size_val = *size_arg; 3155 size = &size_val; 3156 3157 if (smu->ppt_funcs->read_sensor) 3158 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 3159 goto unlock; 3160 3161 switch (sensor) { 3162 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 3163 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 3164 *size = 4; 3165 break; 3166 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 3167 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 3168 *size = 4; 3169 break; 3170 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 3171 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100; 3172 *size = 4; 3173 break; 3174 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 3175 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100; 3176 *size = 4; 3177 break; 3178 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 3179 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); 3180 *size = 8; 3181 break; 3182 case AMDGPU_PP_SENSOR_UVD_POWER: 3183 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 3184 *size = 4; 3185 break; 3186 case AMDGPU_PP_SENSOR_VCE_POWER: 3187 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 3188 *size = 4; 3189 break; 3190 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 3191 *(uint32_t *)data = 0; 3192 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 3193 if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) { 3194 *(uint32_t *)data = 1; 3195 break; 3196 } 3197 } 3198 *size = 4; 3199 break; 3200 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 3201 *(uint32_t *)data = 0; 3202 *size = 4; 3203 break; 3204 default: 3205 *size = 0; 3206 ret = -EOPNOTSUPP; 3207 break; 3208 } 3209 3210 unlock: 3211 // assign uint32_t to int 3212 *size_arg = size_val; 3213 3214 return ret; 3215 } 3216 3217 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit) 3218 { 3219 int ret = -EOPNOTSUPP; 3220 struct smu_context *smu = handle; 3221 3222 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit) 3223 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit); 3224 3225 return ret; 3226 } 3227 3228 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit) 3229 { 3230 int ret = -EOPNOTSUPP; 3231 struct smu_context *smu = handle; 3232 3233 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit) 3234 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit); 3235 3236 return ret; 3237 } 3238 3239 static int smu_get_power_profile_mode(void *handle, char *buf) 3240 { 3241 struct smu_context *smu = handle; 3242 3243 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 3244 !smu->ppt_funcs->get_power_profile_mode) 3245 return -EOPNOTSUPP; 3246 if (!buf) 3247 return -EINVAL; 3248 3249 return smu->ppt_funcs->get_power_profile_mode(smu, buf); 3250 } 3251 3252 static int smu_set_power_profile_mode(void *handle, 3253 long *param, 3254 uint32_t param_size) 3255 { 3256 struct smu_context *smu = handle; 3257 bool custom = false; 3258 int ret = 0; 3259 3260 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 3261 !smu->ppt_funcs->set_power_profile_mode) 3262 return -EOPNOTSUPP; 3263 3264 if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) { 3265 custom = true; 3266 /* clear frontend mask so custom changes propogate */ 3267 smu->workload_mask = 0; 3268 } 3269 3270 if ((param[param_size] != smu->power_profile_mode) || custom) { 3271 /* clear the old user preference */ 3272 smu_power_profile_mode_put(smu, smu->power_profile_mode); 3273 /* set the new user preference */ 3274 smu_power_profile_mode_get(smu, param[param_size]); 3275 ret = smu_bump_power_profile_mode(smu, 3276 custom ? param : NULL, 3277 custom ? param_size : 0); 3278 if (ret) 3279 smu_power_profile_mode_put(smu, param[param_size]); 3280 else 3281 /* store the user's preference */ 3282 smu->power_profile_mode = param[param_size]; 3283 } 3284 3285 return ret; 3286 } 3287 3288 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) 3289 { 3290 struct smu_context *smu = handle; 3291 3292 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3293 return -EOPNOTSUPP; 3294 3295 if (!smu->ppt_funcs->get_fan_control_mode) 3296 return -EOPNOTSUPP; 3297 3298 if (!fan_mode) 3299 return -EINVAL; 3300 3301 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); 3302 3303 return 0; 3304 } 3305 3306 static int smu_set_fan_control_mode(void *handle, u32 value) 3307 { 3308 struct smu_context *smu = handle; 3309 int ret = 0; 3310 3311 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3312 return -EOPNOTSUPP; 3313 3314 if (!smu->ppt_funcs->set_fan_control_mode) 3315 return -EOPNOTSUPP; 3316 3317 if (value == U32_MAX) 3318 return -EINVAL; 3319 3320 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 3321 if (ret) 3322 goto out; 3323 3324 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 3325 smu->user_dpm_profile.fan_mode = value; 3326 3327 /* reset user dpm fan speed */ 3328 if (value != AMD_FAN_CTRL_MANUAL) { 3329 smu->user_dpm_profile.fan_speed_pwm = 0; 3330 smu->user_dpm_profile.fan_speed_rpm = 0; 3331 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); 3332 } 3333 } 3334 3335 out: 3336 return ret; 3337 } 3338 3339 static int smu_get_fan_speed_pwm(void *handle, u32 *speed) 3340 { 3341 struct smu_context *smu = handle; 3342 int ret = 0; 3343 3344 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3345 return -EOPNOTSUPP; 3346 3347 if (!smu->ppt_funcs->get_fan_speed_pwm) 3348 return -EOPNOTSUPP; 3349 3350 if (!speed) 3351 return -EINVAL; 3352 3353 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); 3354 3355 return ret; 3356 } 3357 3358 static int smu_set_fan_speed_pwm(void *handle, u32 speed) 3359 { 3360 struct smu_context *smu = handle; 3361 int ret = 0; 3362 3363 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3364 return -EOPNOTSUPP; 3365 3366 if (!smu->ppt_funcs->set_fan_speed_pwm) 3367 return -EOPNOTSUPP; 3368 3369 if (speed == U32_MAX) 3370 return -EINVAL; 3371 3372 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); 3373 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 3374 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; 3375 smu->user_dpm_profile.fan_speed_pwm = speed; 3376 3377 /* Override custom RPM setting as they cannot co-exist */ 3378 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; 3379 smu->user_dpm_profile.fan_speed_rpm = 0; 3380 } 3381 3382 return ret; 3383 } 3384 3385 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) 3386 { 3387 struct smu_context *smu = handle; 3388 int ret = 0; 3389 3390 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3391 return -EOPNOTSUPP; 3392 3393 if (!smu->ppt_funcs->get_fan_speed_rpm) 3394 return -EOPNOTSUPP; 3395 3396 if (!speed) 3397 return -EINVAL; 3398 3399 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 3400 3401 return ret; 3402 } 3403 3404 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) 3405 { 3406 struct smu_context *smu = handle; 3407 3408 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3409 return -EOPNOTSUPP; 3410 3411 return smu_set_min_dcef_deep_sleep(smu, clk); 3412 } 3413 3414 static int smu_get_clock_by_type_with_latency(void *handle, 3415 enum amd_pp_clock_type type, 3416 struct pp_clock_levels_with_latency *clocks) 3417 { 3418 struct smu_context *smu = handle; 3419 enum smu_clk_type clk_type; 3420 int ret = 0; 3421 3422 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3423 return -EOPNOTSUPP; 3424 3425 if (smu->ppt_funcs->get_clock_by_type_with_latency) { 3426 switch (type) { 3427 case amd_pp_sys_clock: 3428 clk_type = SMU_GFXCLK; 3429 break; 3430 case amd_pp_mem_clock: 3431 clk_type = SMU_MCLK; 3432 break; 3433 case amd_pp_dcef_clock: 3434 clk_type = SMU_DCEFCLK; 3435 break; 3436 case amd_pp_disp_clock: 3437 clk_type = SMU_DISPCLK; 3438 break; 3439 default: 3440 dev_err(smu->adev->dev, "Invalid clock type!\n"); 3441 return -EINVAL; 3442 } 3443 3444 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 3445 } 3446 3447 return ret; 3448 } 3449 3450 static int smu_display_clock_voltage_request(void *handle, 3451 struct pp_display_clock_request *clock_req) 3452 { 3453 struct smu_context *smu = handle; 3454 int ret = 0; 3455 3456 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3457 return -EOPNOTSUPP; 3458 3459 if (smu->ppt_funcs->display_clock_voltage_request) 3460 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 3461 3462 return ret; 3463 } 3464 3465 3466 static int smu_display_disable_memory_clock_switch(void *handle, 3467 bool disable_memory_clock_switch) 3468 { 3469 struct smu_context *smu = handle; 3470 int ret = -EINVAL; 3471 3472 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3473 return -EOPNOTSUPP; 3474 3475 if (smu->ppt_funcs->display_disable_memory_clock_switch) 3476 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 3477 3478 return ret; 3479 } 3480 3481 static int smu_set_xgmi_pstate(void *handle, 3482 uint32_t pstate) 3483 { 3484 struct smu_context *smu = handle; 3485 int ret = 0; 3486 3487 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3488 return -EOPNOTSUPP; 3489 3490 if (smu->ppt_funcs->set_xgmi_pstate) 3491 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 3492 3493 if (ret) 3494 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 3495 3496 return ret; 3497 } 3498 3499 static int smu_get_baco_capability(void *handle) 3500 { 3501 struct smu_context *smu = handle; 3502 3503 if (!smu->pm_enabled) 3504 return false; 3505 3506 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support) 3507 return false; 3508 3509 return smu->ppt_funcs->get_bamaco_support(smu); 3510 } 3511 3512 static int smu_baco_set_state(void *handle, int state) 3513 { 3514 struct smu_context *smu = handle; 3515 int ret = 0; 3516 3517 if (!smu->pm_enabled) 3518 return -EOPNOTSUPP; 3519 3520 if (state == 0) { 3521 if (smu->ppt_funcs->baco_exit) 3522 ret = smu->ppt_funcs->baco_exit(smu); 3523 } else if (state == 1) { 3524 if (smu->ppt_funcs->baco_enter) 3525 ret = smu->ppt_funcs->baco_enter(smu); 3526 } else { 3527 return -EINVAL; 3528 } 3529 3530 if (ret) 3531 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", 3532 (state)?"enter":"exit"); 3533 3534 return ret; 3535 } 3536 3537 bool smu_mode1_reset_is_support(struct smu_context *smu) 3538 { 3539 bool ret = false; 3540 3541 if (!smu->pm_enabled) 3542 return false; 3543 3544 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 3545 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 3546 3547 return ret; 3548 } 3549 3550 bool smu_link_reset_is_support(struct smu_context *smu) 3551 { 3552 if (!smu->pm_enabled) 3553 return false; 3554 3555 return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET); 3556 } 3557 3558 int smu_mode1_reset(struct smu_context *smu) 3559 { 3560 int ret = 0; 3561 3562 if (!smu->pm_enabled) 3563 return -EOPNOTSUPP; 3564 3565 if (smu->ppt_funcs->mode1_reset) 3566 ret = smu->ppt_funcs->mode1_reset(smu); 3567 3568 return ret; 3569 } 3570 3571 static int smu_mode2_reset(void *handle) 3572 { 3573 struct smu_context *smu = handle; 3574 int ret = 0; 3575 3576 if (!smu->pm_enabled) 3577 return -EOPNOTSUPP; 3578 3579 if (smu->ppt_funcs->mode2_reset) 3580 ret = smu->ppt_funcs->mode2_reset(smu); 3581 3582 if (ret) 3583 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 3584 3585 return ret; 3586 } 3587 3588 int smu_link_reset(struct smu_context *smu) 3589 { 3590 int ret = 0; 3591 3592 if (!smu->pm_enabled) 3593 return -EOPNOTSUPP; 3594 3595 if (smu->ppt_funcs->link_reset) 3596 ret = smu->ppt_funcs->link_reset(smu); 3597 3598 return ret; 3599 } 3600 3601 static int smu_enable_gfx_features(void *handle) 3602 { 3603 struct smu_context *smu = handle; 3604 int ret = 0; 3605 3606 if (!smu->pm_enabled) 3607 return -EOPNOTSUPP; 3608 3609 if (smu->ppt_funcs->enable_gfx_features) 3610 ret = smu->ppt_funcs->enable_gfx_features(smu); 3611 3612 if (ret) 3613 dev_err(smu->adev->dev, "enable gfx features failed!\n"); 3614 3615 return ret; 3616 } 3617 3618 static int smu_get_max_sustainable_clocks_by_dc(void *handle, 3619 struct pp_smu_nv_clock_table *max_clocks) 3620 { 3621 struct smu_context *smu = handle; 3622 int ret = 0; 3623 3624 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3625 return -EOPNOTSUPP; 3626 3627 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 3628 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 3629 3630 return ret; 3631 } 3632 3633 static int smu_get_uclk_dpm_states(void *handle, 3634 unsigned int *clock_values_in_khz, 3635 unsigned int *num_states) 3636 { 3637 struct smu_context *smu = handle; 3638 int ret = 0; 3639 3640 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3641 return -EOPNOTSUPP; 3642 3643 if (smu->ppt_funcs->get_uclk_dpm_states) 3644 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 3645 3646 return ret; 3647 } 3648 3649 static enum amd_pm_state_type smu_get_current_power_state(void *handle) 3650 { 3651 struct smu_context *smu = handle; 3652 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 3653 3654 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3655 return -EOPNOTSUPP; 3656 3657 if (smu->ppt_funcs->get_current_power_state) 3658 pm_state = smu->ppt_funcs->get_current_power_state(smu); 3659 3660 return pm_state; 3661 } 3662 3663 static int smu_get_dpm_clock_table(void *handle, 3664 struct dpm_clocks *clock_table) 3665 { 3666 struct smu_context *smu = handle; 3667 int ret = 0; 3668 3669 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3670 return -EOPNOTSUPP; 3671 3672 if (smu->ppt_funcs->get_dpm_clock_table) 3673 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 3674 3675 return ret; 3676 } 3677 3678 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) 3679 { 3680 struct smu_context *smu = handle; 3681 3682 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3683 return -EOPNOTSUPP; 3684 3685 if (!smu->ppt_funcs->get_gpu_metrics) 3686 return -EOPNOTSUPP; 3687 3688 return smu->ppt_funcs->get_gpu_metrics(smu, table); 3689 } 3690 3691 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics, 3692 size_t size) 3693 { 3694 struct smu_context *smu = handle; 3695 3696 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3697 return -EOPNOTSUPP; 3698 3699 if (!smu->ppt_funcs->get_pm_metrics) 3700 return -EOPNOTSUPP; 3701 3702 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size); 3703 } 3704 3705 static int smu_enable_mgpu_fan_boost(void *handle) 3706 { 3707 struct smu_context *smu = handle; 3708 int ret = 0; 3709 3710 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3711 return -EOPNOTSUPP; 3712 3713 if (smu->ppt_funcs->enable_mgpu_fan_boost) 3714 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 3715 3716 return ret; 3717 } 3718 3719 static int smu_gfx_state_change_set(void *handle, 3720 uint32_t state) 3721 { 3722 struct smu_context *smu = handle; 3723 int ret = 0; 3724 3725 if (smu->ppt_funcs->gfx_state_change_set) 3726 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 3727 3728 return ret; 3729 } 3730 3731 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) 3732 { 3733 int ret = 0; 3734 3735 if (smu->ppt_funcs->smu_handle_passthrough_sbr) 3736 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); 3737 3738 return ret; 3739 } 3740 3741 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) 3742 { 3743 int ret = -EOPNOTSUPP; 3744 3745 if (smu->ppt_funcs && 3746 smu->ppt_funcs->get_ecc_info) 3747 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); 3748 3749 return ret; 3750 3751 } 3752 3753 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) 3754 { 3755 struct smu_context *smu = handle; 3756 struct smu_table_context *smu_table = &smu->smu_table; 3757 struct smu_table *memory_pool = &smu_table->memory_pool; 3758 3759 if (!addr || !size) 3760 return -EINVAL; 3761 3762 *addr = NULL; 3763 *size = 0; 3764 if (memory_pool->bo) { 3765 *addr = memory_pool->cpu_addr; 3766 *size = memory_pool->size; 3767 } 3768 3769 return 0; 3770 } 3771 3772 static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf, 3773 size_t *size) 3774 { 3775 size_t offset = *size; 3776 int level; 3777 3778 for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) { 3779 if (level == policy->current_level) 3780 offset += sysfs_emit_at(sysbuf, offset, 3781 "%d : %s*\n", level, 3782 policy->desc->get_desc(policy, level)); 3783 else 3784 offset += sysfs_emit_at(sysbuf, offset, 3785 "%d : %s\n", level, 3786 policy->desc->get_desc(policy, level)); 3787 } 3788 3789 *size = offset; 3790 } 3791 3792 ssize_t smu_get_pm_policy_info(struct smu_context *smu, 3793 enum pp_pm_policy p_type, char *sysbuf) 3794 { 3795 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; 3796 struct smu_dpm_policy_ctxt *policy_ctxt; 3797 struct smu_dpm_policy *dpm_policy; 3798 size_t offset = 0; 3799 3800 policy_ctxt = dpm_ctxt->dpm_policies; 3801 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || 3802 !policy_ctxt->policy_mask) 3803 return -EOPNOTSUPP; 3804 3805 if (p_type == PP_PM_POLICY_NONE) 3806 return -EINVAL; 3807 3808 dpm_policy = smu_get_pm_policy(smu, p_type); 3809 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc) 3810 return -ENOENT; 3811 3812 if (!sysbuf) 3813 return -EINVAL; 3814 3815 smu_print_dpm_policy(dpm_policy, sysbuf, &offset); 3816 3817 return offset; 3818 } 3819 3820 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, 3821 enum pp_pm_policy p_type) 3822 { 3823 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; 3824 struct smu_dpm_policy_ctxt *policy_ctxt; 3825 int i; 3826 3827 policy_ctxt = dpm_ctxt->dpm_policies; 3828 if (!policy_ctxt) 3829 return NULL; 3830 3831 for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) { 3832 if (policy_ctxt->policies[i].policy_type == p_type) 3833 return &policy_ctxt->policies[i]; 3834 } 3835 3836 return NULL; 3837 } 3838 3839 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, 3840 int level) 3841 { 3842 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; 3843 struct smu_dpm_policy *dpm_policy = NULL; 3844 struct smu_dpm_policy_ctxt *policy_ctxt; 3845 int ret = -EOPNOTSUPP; 3846 3847 policy_ctxt = dpm_ctxt->dpm_policies; 3848 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || 3849 !policy_ctxt->policy_mask) 3850 return ret; 3851 3852 if (level < 0 || level >= PP_POLICY_MAX_LEVELS) 3853 return -EINVAL; 3854 3855 dpm_policy = smu_get_pm_policy(smu, p_type); 3856 3857 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy) 3858 return ret; 3859 3860 if (dpm_policy->current_level == level) 3861 return 0; 3862 3863 ret = dpm_policy->set_policy(smu, level); 3864 3865 if (!ret) 3866 dpm_policy->current_level = level; 3867 3868 return ret; 3869 } 3870 3871 static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table) 3872 { 3873 struct smu_context *smu = handle; 3874 struct smu_table_context *smu_table = &smu->smu_table; 3875 struct smu_table *tables = smu_table->tables; 3876 enum smu_table_id table_id; 3877 3878 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3879 return -EOPNOTSUPP; 3880 3881 if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics) 3882 return -EOPNOTSUPP; 3883 3884 table_id = smu_metrics_get_temp_table_id(type); 3885 3886 if (table_id == SMU_TABLE_COUNT) 3887 return -EINVAL; 3888 3889 /* If the request is to get size alone, return the cached table size */ 3890 if (!table && tables[table_id].cache.size) 3891 return tables[table_id].cache.size; 3892 3893 if (smu_table_cache_is_valid(&tables[table_id])) { 3894 memcpy(table, tables[table_id].cache.buffer, 3895 tables[table_id].cache.size); 3896 return tables[table_id].cache.size; 3897 } 3898 3899 return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table); 3900 } 3901 3902 static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type) 3903 { 3904 struct smu_context *smu = handle; 3905 bool ret = false; 3906 3907 if (!smu->pm_enabled) 3908 return false; 3909 3910 if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported) 3911 ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type); 3912 3913 return ret; 3914 } 3915 3916 static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table) 3917 { 3918 struct smu_context *smu = handle; 3919 3920 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3921 return -EOPNOTSUPP; 3922 3923 if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics) 3924 return -EOPNOTSUPP; 3925 3926 return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table); 3927 } 3928 3929 static const struct amd_pm_funcs swsmu_pm_funcs = { 3930 /* export for sysfs */ 3931 .set_fan_control_mode = smu_set_fan_control_mode, 3932 .get_fan_control_mode = smu_get_fan_control_mode, 3933 .set_fan_speed_pwm = smu_set_fan_speed_pwm, 3934 .get_fan_speed_pwm = smu_get_fan_speed_pwm, 3935 .force_clock_level = smu_force_ppclk_levels, 3936 .print_clock_levels = smu_print_ppclk_levels, 3937 .emit_clock_levels = smu_emit_ppclk_levels, 3938 .force_performance_level = smu_force_performance_level, 3939 .read_sensor = smu_read_sensor, 3940 .get_apu_thermal_limit = smu_get_apu_thermal_limit, 3941 .set_apu_thermal_limit = smu_set_apu_thermal_limit, 3942 .get_performance_level = smu_get_performance_level, 3943 .get_current_power_state = smu_get_current_power_state, 3944 .get_fan_speed_rpm = smu_get_fan_speed_rpm, 3945 .set_fan_speed_rpm = smu_set_fan_speed_rpm, 3946 .get_pp_num_states = smu_get_power_num_states, 3947 .get_pp_table = smu_sys_get_pp_table, 3948 .set_pp_table = smu_sys_set_pp_table, 3949 .switch_power_profile = smu_switch_power_profile, 3950 .pause_power_profile = smu_pause_power_profile, 3951 /* export to amdgpu */ 3952 .dispatch_tasks = smu_handle_dpm_task, 3953 .load_firmware = smu_load_microcode, 3954 .set_powergating_by_smu = smu_dpm_set_power_gate, 3955 .set_power_limit = smu_set_power_limit, 3956 .get_power_limit = smu_get_power_limit, 3957 .get_power_profile_mode = smu_get_power_profile_mode, 3958 .set_power_profile_mode = smu_set_power_profile_mode, 3959 .odn_edit_dpm_table = smu_od_edit_dpm_table, 3960 .set_mp1_state = smu_set_mp1_state, 3961 .gfx_state_change_set = smu_gfx_state_change_set, 3962 /* export to DC */ 3963 .get_sclk = smu_get_sclk, 3964 .get_mclk = smu_get_mclk, 3965 .display_configuration_change = smu_display_configuration_change, 3966 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, 3967 .display_clock_voltage_request = smu_display_clock_voltage_request, 3968 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, 3969 .set_active_display_count = smu_set_display_count, 3970 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, 3971 .get_asic_baco_capability = smu_get_baco_capability, 3972 .set_asic_baco_state = smu_baco_set_state, 3973 .get_ppfeature_status = smu_sys_get_pp_feature_mask, 3974 .set_ppfeature_status = smu_sys_set_pp_feature_mask, 3975 .asic_reset_mode_2 = smu_mode2_reset, 3976 .asic_reset_enable_gfx_features = smu_enable_gfx_features, 3977 .set_df_cstate = smu_set_df_cstate, 3978 .set_xgmi_pstate = smu_set_xgmi_pstate, 3979 .get_gpu_metrics = smu_sys_get_gpu_metrics, 3980 .get_pm_metrics = smu_sys_get_pm_metrics, 3981 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, 3982 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, 3983 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, 3984 .get_uclk_dpm_states = smu_get_uclk_dpm_states, 3985 .get_dpm_clock_table = smu_get_dpm_clock_table, 3986 .get_smu_prv_buf_details = smu_get_prv_buffer_details, 3987 .get_xcp_metrics = smu_sys_get_xcp_metrics, 3988 .get_temp_metrics = smu_sys_get_temp_metrics, 3989 .temp_metrics_is_supported = smu_temp_metrics_is_supported, 3990 }; 3991 3992 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, 3993 uint64_t event_arg) 3994 { 3995 int ret = -EINVAL; 3996 3997 if (smu->ppt_funcs->wait_for_event) 3998 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); 3999 4000 return ret; 4001 } 4002 4003 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) 4004 { 4005 4006 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) 4007 return -EOPNOTSUPP; 4008 4009 /* Confirm the buffer allocated is of correct size */ 4010 if (size != smu->stb_context.stb_buf_size) 4011 return -EINVAL; 4012 4013 /* 4014 * No need to lock smu mutex as we access STB directly through MMIO 4015 * and not going through SMU messaging route (for now at least). 4016 * For registers access rely on implementation internal locking. 4017 */ 4018 return smu->ppt_funcs->stb_collect_info(smu, buf, size); 4019 } 4020 4021 #if defined(CONFIG_DEBUG_FS) 4022 4023 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) 4024 { 4025 struct amdgpu_device *adev = filp->f_inode->i_private; 4026 struct smu_context *smu = adev->powerplay.pp_handle; 4027 unsigned char *buf; 4028 int r; 4029 4030 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); 4031 if (!buf) 4032 return -ENOMEM; 4033 4034 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); 4035 if (r) 4036 goto out; 4037 4038 filp->private_data = buf; 4039 4040 return 0; 4041 4042 out: 4043 kvfree(buf); 4044 return r; 4045 } 4046 4047 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, 4048 loff_t *pos) 4049 { 4050 struct amdgpu_device *adev = filp->f_inode->i_private; 4051 struct smu_context *smu = adev->powerplay.pp_handle; 4052 4053 4054 if (!filp->private_data) 4055 return -EINVAL; 4056 4057 return simple_read_from_buffer(buf, 4058 size, 4059 pos, filp->private_data, 4060 smu->stb_context.stb_buf_size); 4061 } 4062 4063 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp) 4064 { 4065 kvfree(filp->private_data); 4066 filp->private_data = NULL; 4067 4068 return 0; 4069 } 4070 4071 /* 4072 * We have to define not only read method but also 4073 * open and release because .read takes up to PAGE_SIZE 4074 * data each time so and so is invoked multiple times. 4075 * We allocate the STB buffer in .open and release it 4076 * in .release 4077 */ 4078 static const struct file_operations smu_stb_debugfs_fops = { 4079 .owner = THIS_MODULE, 4080 .open = smu_stb_debugfs_open, 4081 .read = smu_stb_debugfs_read, 4082 .release = smu_stb_debugfs_release, 4083 .llseek = default_llseek, 4084 }; 4085 4086 #endif 4087 4088 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) 4089 { 4090 #if defined(CONFIG_DEBUG_FS) 4091 4092 struct smu_context *smu = adev->powerplay.pp_handle; 4093 4094 if (!smu || (!smu->stb_context.stb_buf_size)) 4095 return; 4096 4097 debugfs_create_file_size("amdgpu_smu_stb_dump", 4098 S_IRUSR, 4099 adev_to_drm(adev)->primary->debugfs_root, 4100 adev, 4101 &smu_stb_debugfs_fops, 4102 smu->stb_context.stb_buf_size); 4103 #endif 4104 } 4105 4106 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) 4107 { 4108 int ret = 0; 4109 4110 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) 4111 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); 4112 4113 return ret; 4114 } 4115 4116 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) 4117 { 4118 int ret = 0; 4119 4120 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) 4121 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); 4122 4123 return ret; 4124 } 4125 4126 int smu_send_rma_reason(struct smu_context *smu) 4127 { 4128 int ret = 0; 4129 4130 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason) 4131 ret = smu->ppt_funcs->send_rma_reason(smu); 4132 4133 return ret; 4134 } 4135 4136 /** 4137 * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU 4138 * @smu: smu_context pointer 4139 * 4140 * This function checks if the SMU supports resetting the SDMA engine. 4141 * It returns true if supported, false otherwise. 4142 */ 4143 bool smu_reset_sdma_is_supported(struct smu_context *smu) 4144 { 4145 return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET); 4146 } 4147 4148 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) 4149 { 4150 int ret = 0; 4151 4152 if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma) 4153 ret = smu->ppt_funcs->reset_sdma(smu, inst_mask); 4154 4155 return ret; 4156 } 4157 4158 bool smu_reset_vcn_is_supported(struct smu_context *smu) 4159 { 4160 return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET); 4161 } 4162 4163 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask) 4164 { 4165 if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn) 4166 smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask); 4167 4168 return 0; 4169 } 4170