1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L1 24 25 #include <linux/firmware.h> 26 #include <linux/pci.h> 27 #include <linux/reboot.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_smu.h" 31 #include "smu_internal.h" 32 #include "atom.h" 33 #include "arcturus_ppt.h" 34 #include "navi10_ppt.h" 35 #include "sienna_cichlid_ppt.h" 36 #include "renoir_ppt.h" 37 #include "vangogh_ppt.h" 38 #include "aldebaran_ppt.h" 39 #include "yellow_carp_ppt.h" 40 #include "cyan_skillfish_ppt.h" 41 #include "smu_v13_0_0_ppt.h" 42 #include "smu_v13_0_4_ppt.h" 43 #include "smu_v13_0_5_ppt.h" 44 #include "smu_v13_0_6_ppt.h" 45 #include "smu_v13_0_7_ppt.h" 46 #include "amd_pcie.h" 47 48 /* 49 * DO NOT use these for err/warn/info/debug messages. 50 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 51 * They are more MGPU friendly. 52 */ 53 #undef pr_err 54 #undef pr_warn 55 #undef pr_info 56 #undef pr_debug 57 58 static const struct amd_pm_funcs swsmu_pm_funcs; 59 static int smu_force_smuclk_levels(struct smu_context *smu, 60 enum smu_clk_type clk_type, 61 uint32_t mask); 62 static int smu_handle_task(struct smu_context *smu, 63 enum amd_dpm_forced_level level, 64 enum amd_pp_task task_id); 65 static int smu_reset(struct smu_context *smu); 66 static int smu_set_fan_speed_pwm(void *handle, u32 speed); 67 static int smu_set_fan_control_mode(void *handle, u32 value); 68 static int smu_set_power_limit(void *handle, uint32_t limit); 69 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); 70 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); 71 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); 72 73 static int smu_sys_get_pp_feature_mask(void *handle, 74 char *buf) 75 { 76 struct smu_context *smu = handle; 77 78 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 79 return -EOPNOTSUPP; 80 81 return smu_get_pp_feature_mask(smu, buf); 82 } 83 84 static int smu_sys_set_pp_feature_mask(void *handle, 85 uint64_t new_mask) 86 { 87 struct smu_context *smu = handle; 88 89 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 90 return -EOPNOTSUPP; 91 92 return smu_set_pp_feature_mask(smu, new_mask); 93 } 94 95 int smu_set_residency_gfxoff(struct smu_context *smu, bool value) 96 { 97 if (!smu->ppt_funcs->set_gfx_off_residency) 98 return -EINVAL; 99 100 return smu_set_gfx_off_residency(smu, value); 101 } 102 103 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) 104 { 105 if (!smu->ppt_funcs->get_gfx_off_residency) 106 return -EINVAL; 107 108 return smu_get_gfx_off_residency(smu, value); 109 } 110 111 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) 112 { 113 if (!smu->ppt_funcs->get_gfx_off_entrycount) 114 return -EINVAL; 115 116 return smu_get_gfx_off_entrycount(smu, value); 117 } 118 119 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) 120 { 121 if (!smu->ppt_funcs->get_gfx_off_status) 122 return -EINVAL; 123 124 *value = smu_get_gfx_off_status(smu); 125 126 return 0; 127 } 128 129 int smu_set_soft_freq_range(struct smu_context *smu, 130 enum smu_clk_type clk_type, 131 uint32_t min, 132 uint32_t max) 133 { 134 int ret = 0; 135 136 if (smu->ppt_funcs->set_soft_freq_limited_range) 137 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 138 clk_type, 139 min, 140 max); 141 142 return ret; 143 } 144 145 int smu_get_dpm_freq_range(struct smu_context *smu, 146 enum smu_clk_type clk_type, 147 uint32_t *min, 148 uint32_t *max) 149 { 150 int ret = -ENOTSUPP; 151 152 if (!min && !max) 153 return -EINVAL; 154 155 if (smu->ppt_funcs->get_dpm_ultimate_freq) 156 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 157 clk_type, 158 min, 159 max); 160 161 return ret; 162 } 163 164 int smu_set_gfx_power_up_by_imu(struct smu_context *smu) 165 { 166 int ret = 0; 167 struct amdgpu_device *adev = smu->adev; 168 169 if (smu->ppt_funcs->set_gfx_power_up_by_imu) { 170 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); 171 if (ret) 172 dev_err(adev->dev, "Failed to enable gfx imu!\n"); 173 } 174 return ret; 175 } 176 177 static u32 smu_get_mclk(void *handle, bool low) 178 { 179 struct smu_context *smu = handle; 180 uint32_t clk_freq; 181 int ret = 0; 182 183 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, 184 low ? &clk_freq : NULL, 185 !low ? &clk_freq : NULL); 186 if (ret) 187 return 0; 188 return clk_freq * 100; 189 } 190 191 static u32 smu_get_sclk(void *handle, bool low) 192 { 193 struct smu_context *smu = handle; 194 uint32_t clk_freq; 195 int ret = 0; 196 197 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, 198 low ? &clk_freq : NULL, 199 !low ? &clk_freq : NULL); 200 if (ret) 201 return 0; 202 return clk_freq * 100; 203 } 204 205 static int smu_set_gfx_imu_enable(struct smu_context *smu) 206 { 207 struct amdgpu_device *adev = smu->adev; 208 209 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 210 return 0; 211 212 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix) 213 return 0; 214 215 return smu_set_gfx_power_up_by_imu(smu); 216 } 217 218 static bool is_vcn_enabled(struct amdgpu_device *adev) 219 { 220 int i; 221 222 for (i = 0; i < adev->num_ip_blocks; i++) { 223 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN || 224 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) && 225 !adev->ip_blocks[i].status.valid) 226 return false; 227 } 228 229 return true; 230 } 231 232 static int smu_dpm_set_vcn_enable(struct smu_context *smu, 233 bool enable) 234 { 235 struct smu_power_context *smu_power = &smu->smu_power; 236 struct smu_power_gate *power_gate = &smu_power->power_gate; 237 int ret = 0; 238 239 /* 240 * don't poweron vcn/jpeg when they are skipped. 241 */ 242 if (!is_vcn_enabled(smu->adev)) 243 return 0; 244 245 if (!smu->ppt_funcs->dpm_set_vcn_enable) 246 return 0; 247 248 if (atomic_read(&power_gate->vcn_gated) ^ enable) 249 return 0; 250 251 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 252 if (!ret) 253 atomic_set(&power_gate->vcn_gated, !enable); 254 255 return ret; 256 } 257 258 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 259 bool enable) 260 { 261 struct smu_power_context *smu_power = &smu->smu_power; 262 struct smu_power_gate *power_gate = &smu_power->power_gate; 263 int ret = 0; 264 265 if (!is_vcn_enabled(smu->adev)) 266 return 0; 267 268 if (!smu->ppt_funcs->dpm_set_jpeg_enable) 269 return 0; 270 271 if (atomic_read(&power_gate->jpeg_gated) ^ enable) 272 return 0; 273 274 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 275 if (!ret) 276 atomic_set(&power_gate->jpeg_gated, !enable); 277 278 return ret; 279 } 280 281 /** 282 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 283 * 284 * @handle: smu_context pointer 285 * @block_type: the IP block to power gate/ungate 286 * @gate: to power gate if true, ungate otherwise 287 * 288 * This API uses no smu->mutex lock protection due to: 289 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 290 * This is guarded to be race condition free by the caller. 291 * 2. Or get called on user setting request of power_dpm_force_performance_level. 292 * Under this case, the smu->mutex lock protection is already enforced on 293 * the parent API smu_force_performance_level of the call path. 294 */ 295 static int smu_dpm_set_power_gate(void *handle, 296 uint32_t block_type, 297 bool gate) 298 { 299 struct smu_context *smu = handle; 300 int ret = 0; 301 302 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { 303 dev_WARN(smu->adev->dev, 304 "SMU uninitialized but power %s requested for %u!\n", 305 gate ? "gate" : "ungate", block_type); 306 return -EOPNOTSUPP; 307 } 308 309 switch (block_type) { 310 /* 311 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 312 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 313 */ 314 case AMD_IP_BLOCK_TYPE_UVD: 315 case AMD_IP_BLOCK_TYPE_VCN: 316 ret = smu_dpm_set_vcn_enable(smu, !gate); 317 if (ret) 318 dev_err(smu->adev->dev, "Failed to power %s VCN!\n", 319 gate ? "gate" : "ungate"); 320 break; 321 case AMD_IP_BLOCK_TYPE_GFX: 322 ret = smu_gfx_off_control(smu, gate); 323 if (ret) 324 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 325 gate ? "enable" : "disable"); 326 break; 327 case AMD_IP_BLOCK_TYPE_SDMA: 328 ret = smu_powergate_sdma(smu, gate); 329 if (ret) 330 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 331 gate ? "gate" : "ungate"); 332 break; 333 case AMD_IP_BLOCK_TYPE_JPEG: 334 ret = smu_dpm_set_jpeg_enable(smu, !gate); 335 if (ret) 336 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 337 gate ? "gate" : "ungate"); 338 break; 339 default: 340 dev_err(smu->adev->dev, "Unsupported block type!\n"); 341 return -EINVAL; 342 } 343 344 return ret; 345 } 346 347 /** 348 * smu_set_user_clk_dependencies - set user profile clock dependencies 349 * 350 * @smu: smu_context pointer 351 * @clk: enum smu_clk_type type 352 * 353 * Enable/Disable the clock dependency for the @clk type. 354 */ 355 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) 356 { 357 if (smu->adev->in_suspend) 358 return; 359 360 if (clk == SMU_MCLK) { 361 smu->user_dpm_profile.clk_dependency = 0; 362 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); 363 } else if (clk == SMU_FCLK) { 364 /* MCLK takes precedence over FCLK */ 365 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 366 return; 367 368 smu->user_dpm_profile.clk_dependency = 0; 369 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); 370 } else if (clk == SMU_SOCCLK) { 371 /* MCLK takes precedence over SOCCLK */ 372 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 373 return; 374 375 smu->user_dpm_profile.clk_dependency = 0; 376 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); 377 } else 378 /* Add clk dependencies here, if any */ 379 return; 380 } 381 382 /** 383 * smu_restore_dpm_user_profile - reinstate user dpm profile 384 * 385 * @smu: smu_context pointer 386 * 387 * Restore the saved user power configurations include power limit, 388 * clock frequencies, fan control mode and fan speed. 389 */ 390 static void smu_restore_dpm_user_profile(struct smu_context *smu) 391 { 392 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 393 int ret = 0; 394 395 if (!smu->adev->in_suspend) 396 return; 397 398 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 399 return; 400 401 /* Enable restore flag */ 402 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; 403 404 /* set the user dpm power limit */ 405 if (smu->user_dpm_profile.power_limit) { 406 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); 407 if (ret) 408 dev_err(smu->adev->dev, "Failed to set power limit value\n"); 409 } 410 411 /* set the user dpm clock configurations */ 412 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 413 enum smu_clk_type clk_type; 414 415 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) { 416 /* 417 * Iterate over smu clk type and force the saved user clk 418 * configs, skip if clock dependency is enabled 419 */ 420 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && 421 smu->user_dpm_profile.clk_mask[clk_type]) { 422 ret = smu_force_smuclk_levels(smu, clk_type, 423 smu->user_dpm_profile.clk_mask[clk_type]); 424 if (ret) 425 dev_err(smu->adev->dev, 426 "Failed to set clock type = %d\n", clk_type); 427 } 428 } 429 } 430 431 /* set the user dpm fan configurations */ 432 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || 433 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { 434 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); 435 if (ret != -EOPNOTSUPP) { 436 smu->user_dpm_profile.fan_speed_pwm = 0; 437 smu->user_dpm_profile.fan_speed_rpm = 0; 438 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; 439 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); 440 } 441 442 if (smu->user_dpm_profile.fan_speed_pwm) { 443 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); 444 if (ret != -EOPNOTSUPP) 445 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); 446 } 447 448 if (smu->user_dpm_profile.fan_speed_rpm) { 449 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); 450 if (ret != -EOPNOTSUPP) 451 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); 452 } 453 } 454 455 /* Restore user customized OD settings */ 456 if (smu->user_dpm_profile.user_od) { 457 if (smu->ppt_funcs->restore_user_od_settings) { 458 ret = smu->ppt_funcs->restore_user_od_settings(smu); 459 if (ret) 460 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); 461 } 462 } 463 464 /* Disable restore flag */ 465 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; 466 } 467 468 static int smu_get_power_num_states(void *handle, 469 struct pp_states_info *state_info) 470 { 471 if (!state_info) 472 return -EINVAL; 473 474 /* not support power state */ 475 memset(state_info, 0, sizeof(struct pp_states_info)); 476 state_info->nums = 1; 477 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 478 479 return 0; 480 } 481 482 bool is_support_sw_smu(struct amdgpu_device *adev) 483 { 484 /* vega20 is 11.0.2, but it's supported via the powerplay code */ 485 if (adev->asic_type == CHIP_VEGA20) 486 return false; 487 488 if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) 489 return true; 490 491 return false; 492 } 493 494 bool is_support_cclk_dpm(struct amdgpu_device *adev) 495 { 496 struct smu_context *smu = adev->powerplay.pp_handle; 497 498 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) 499 return false; 500 501 return true; 502 } 503 504 505 static int smu_sys_get_pp_table(void *handle, 506 char **table) 507 { 508 struct smu_context *smu = handle; 509 struct smu_table_context *smu_table = &smu->smu_table; 510 511 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 512 return -EOPNOTSUPP; 513 514 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 515 return -EINVAL; 516 517 if (smu_table->hardcode_pptable) 518 *table = smu_table->hardcode_pptable; 519 else 520 *table = smu_table->power_play_table; 521 522 return smu_table->power_play_table_size; 523 } 524 525 static int smu_sys_set_pp_table(void *handle, 526 const char *buf, 527 size_t size) 528 { 529 struct smu_context *smu = handle; 530 struct smu_table_context *smu_table = &smu->smu_table; 531 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 532 int ret = 0; 533 534 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 535 return -EOPNOTSUPP; 536 537 if (header->usStructureSize != size) { 538 dev_err(smu->adev->dev, "pp table size not matched !\n"); 539 return -EIO; 540 } 541 542 if (!smu_table->hardcode_pptable) { 543 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 544 if (!smu_table->hardcode_pptable) 545 return -ENOMEM; 546 } 547 548 memcpy(smu_table->hardcode_pptable, buf, size); 549 smu_table->power_play_table = smu_table->hardcode_pptable; 550 smu_table->power_play_table_size = size; 551 552 /* 553 * Special hw_fini action(for Navi1x, the DPMs disablement will be 554 * skipped) may be needed for custom pptable uploading. 555 */ 556 smu->uploading_custom_pp_table = true; 557 558 ret = smu_reset(smu); 559 if (ret) 560 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 561 562 smu->uploading_custom_pp_table = false; 563 564 return ret; 565 } 566 567 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 568 { 569 struct smu_feature *feature = &smu->smu_feature; 570 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 571 int ret = 0; 572 573 /* 574 * With SCPM enabled, the allowed featuremasks setting(via 575 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted. 576 * That means there is no way to let PMFW knows the settings below. 577 * Thus, we just assume all the features are allowed under 578 * such scenario. 579 */ 580 if (smu->adev->scpm_enabled) { 581 bitmap_fill(feature->allowed, SMU_FEATURE_MAX); 582 return 0; 583 } 584 585 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 586 587 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 588 SMU_FEATURE_MAX/32); 589 if (ret) 590 return ret; 591 592 bitmap_or(feature->allowed, feature->allowed, 593 (unsigned long *)allowed_feature_mask, 594 feature->feature_num); 595 596 return ret; 597 } 598 599 static int smu_set_funcs(struct amdgpu_device *adev) 600 { 601 struct smu_context *smu = adev->powerplay.pp_handle; 602 603 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 604 smu->od_enabled = true; 605 606 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 607 case IP_VERSION(11, 0, 0): 608 case IP_VERSION(11, 0, 5): 609 case IP_VERSION(11, 0, 9): 610 navi10_set_ppt_funcs(smu); 611 break; 612 case IP_VERSION(11, 0, 7): 613 case IP_VERSION(11, 0, 11): 614 case IP_VERSION(11, 0, 12): 615 case IP_VERSION(11, 0, 13): 616 sienna_cichlid_set_ppt_funcs(smu); 617 break; 618 case IP_VERSION(12, 0, 0): 619 case IP_VERSION(12, 0, 1): 620 renoir_set_ppt_funcs(smu); 621 break; 622 case IP_VERSION(11, 5, 0): 623 vangogh_set_ppt_funcs(smu); 624 break; 625 case IP_VERSION(13, 0, 1): 626 case IP_VERSION(13, 0, 3): 627 case IP_VERSION(13, 0, 8): 628 yellow_carp_set_ppt_funcs(smu); 629 break; 630 case IP_VERSION(13, 0, 4): 631 case IP_VERSION(13, 0, 11): 632 smu_v13_0_4_set_ppt_funcs(smu); 633 break; 634 case IP_VERSION(13, 0, 5): 635 smu_v13_0_5_set_ppt_funcs(smu); 636 break; 637 case IP_VERSION(11, 0, 8): 638 cyan_skillfish_set_ppt_funcs(smu); 639 break; 640 case IP_VERSION(11, 0, 2): 641 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 642 arcturus_set_ppt_funcs(smu); 643 /* OD is not supported on Arcturus */ 644 smu->od_enabled = false; 645 break; 646 case IP_VERSION(13, 0, 2): 647 aldebaran_set_ppt_funcs(smu); 648 /* Enable pp_od_clk_voltage node */ 649 smu->od_enabled = true; 650 break; 651 case IP_VERSION(13, 0, 0): 652 case IP_VERSION(13, 0, 10): 653 smu_v13_0_0_set_ppt_funcs(smu); 654 break; 655 case IP_VERSION(13, 0, 6): 656 smu_v13_0_6_set_ppt_funcs(smu); 657 /* Enable pp_od_clk_voltage node */ 658 smu->od_enabled = true; 659 break; 660 case IP_VERSION(13, 0, 7): 661 smu_v13_0_7_set_ppt_funcs(smu); 662 break; 663 default: 664 return -EINVAL; 665 } 666 667 return 0; 668 } 669 670 static int smu_early_init(void *handle) 671 { 672 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 673 struct smu_context *smu; 674 int r; 675 676 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); 677 if (!smu) 678 return -ENOMEM; 679 680 smu->adev = adev; 681 smu->pm_enabled = !!amdgpu_dpm; 682 smu->is_apu = false; 683 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 684 smu->smu_baco.platform_support = false; 685 smu->user_dpm_profile.fan_mode = -1; 686 687 mutex_init(&smu->message_lock); 688 689 adev->powerplay.pp_handle = smu; 690 adev->powerplay.pp_funcs = &swsmu_pm_funcs; 691 692 r = smu_set_funcs(adev); 693 if (r) 694 return r; 695 return smu_init_microcode(smu); 696 } 697 698 static int smu_set_default_dpm_table(struct smu_context *smu) 699 { 700 struct smu_power_context *smu_power = &smu->smu_power; 701 struct smu_power_gate *power_gate = &smu_power->power_gate; 702 int vcn_gate, jpeg_gate; 703 int ret = 0; 704 705 if (!smu->ppt_funcs->set_default_dpm_table) 706 return 0; 707 708 vcn_gate = atomic_read(&power_gate->vcn_gated); 709 jpeg_gate = atomic_read(&power_gate->jpeg_gated); 710 711 ret = smu_dpm_set_vcn_enable(smu, true); 712 if (ret) 713 return ret; 714 715 ret = smu_dpm_set_jpeg_enable(smu, true); 716 if (ret) 717 goto err_out; 718 719 ret = smu->ppt_funcs->set_default_dpm_table(smu); 720 if (ret) 721 dev_err(smu->adev->dev, 722 "Failed to setup default dpm clock tables!\n"); 723 724 smu_dpm_set_jpeg_enable(smu, !jpeg_gate); 725 err_out: 726 smu_dpm_set_vcn_enable(smu, !vcn_gate); 727 return ret; 728 } 729 730 static int smu_apply_default_config_table_settings(struct smu_context *smu) 731 { 732 struct amdgpu_device *adev = smu->adev; 733 int ret = 0; 734 735 ret = smu_get_default_config_table_settings(smu, 736 &adev->pm.config_table); 737 if (ret) 738 return ret; 739 740 return smu_set_config_table(smu, &adev->pm.config_table); 741 } 742 743 static int smu_late_init(void *handle) 744 { 745 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 746 struct smu_context *smu = adev->powerplay.pp_handle; 747 int ret = 0; 748 749 smu_set_fine_grain_gfx_freq_parameters(smu); 750 751 if (!smu->pm_enabled) 752 return 0; 753 754 ret = smu_post_init(smu); 755 if (ret) { 756 dev_err(adev->dev, "Failed to post smu init!\n"); 757 return ret; 758 } 759 760 /* 761 * Explicitly notify PMFW the power mode the system in. Since 762 * the PMFW may boot the ASIC with a different mode. 763 * For those supporting ACDC switch via gpio, PMFW will 764 * handle the switch automatically. Driver involvement 765 * is unnecessary. 766 */ 767 if (!smu->dc_controlled_by_gpio) { 768 ret = smu_set_power_source(smu, 769 adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 770 SMU_POWER_SOURCE_DC); 771 if (ret) { 772 dev_err(adev->dev, "Failed to switch to %s mode!\n", 773 adev->pm.ac_power ? "AC" : "DC"); 774 return ret; 775 } 776 } 777 778 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) || 779 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3))) 780 return 0; 781 782 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { 783 ret = smu_set_default_od_settings(smu); 784 if (ret) { 785 dev_err(adev->dev, "Failed to setup default OD settings!\n"); 786 return ret; 787 } 788 } 789 790 ret = smu_populate_umd_state_clk(smu); 791 if (ret) { 792 dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 793 return ret; 794 } 795 796 ret = smu_get_asic_power_limits(smu, 797 &smu->current_power_limit, 798 &smu->default_power_limit, 799 &smu->max_power_limit); 800 if (ret) { 801 dev_err(adev->dev, "Failed to get asic power limits!\n"); 802 return ret; 803 } 804 805 if (!amdgpu_sriov_vf(adev)) 806 smu_get_unique_id(smu); 807 808 smu_get_fan_parameters(smu); 809 810 smu_handle_task(smu, 811 smu->smu_dpm.dpm_level, 812 AMD_PP_TASK_COMPLETE_INIT); 813 814 ret = smu_apply_default_config_table_settings(smu); 815 if (ret && (ret != -EOPNOTSUPP)) { 816 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n"); 817 return ret; 818 } 819 820 smu_restore_dpm_user_profile(smu); 821 822 return 0; 823 } 824 825 static int smu_init_fb_allocations(struct smu_context *smu) 826 { 827 struct amdgpu_device *adev = smu->adev; 828 struct smu_table_context *smu_table = &smu->smu_table; 829 struct smu_table *tables = smu_table->tables; 830 struct smu_table *driver_table = &(smu_table->driver_table); 831 uint32_t max_table_size = 0; 832 int ret, i; 833 834 /* VRAM allocation for tool table */ 835 if (tables[SMU_TABLE_PMSTATUSLOG].size) { 836 ret = amdgpu_bo_create_kernel(adev, 837 tables[SMU_TABLE_PMSTATUSLOG].size, 838 tables[SMU_TABLE_PMSTATUSLOG].align, 839 tables[SMU_TABLE_PMSTATUSLOG].domain, 840 &tables[SMU_TABLE_PMSTATUSLOG].bo, 841 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 842 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 843 if (ret) { 844 dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 845 return ret; 846 } 847 } 848 849 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT; 850 /* VRAM allocation for driver table */ 851 for (i = 0; i < SMU_TABLE_COUNT; i++) { 852 if (tables[i].size == 0) 853 continue; 854 855 /* If one of the tables has VRAM domain restriction, keep it in 856 * VRAM 857 */ 858 if ((tables[i].domain & 859 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) == 860 AMDGPU_GEM_DOMAIN_VRAM) 861 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 862 863 if (i == SMU_TABLE_PMSTATUSLOG) 864 continue; 865 866 if (max_table_size < tables[i].size) 867 max_table_size = tables[i].size; 868 } 869 870 driver_table->size = max_table_size; 871 driver_table->align = PAGE_SIZE; 872 873 ret = amdgpu_bo_create_kernel(adev, 874 driver_table->size, 875 driver_table->align, 876 driver_table->domain, 877 &driver_table->bo, 878 &driver_table->mc_address, 879 &driver_table->cpu_addr); 880 if (ret) { 881 dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 882 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 883 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 884 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 885 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 886 } 887 888 return ret; 889 } 890 891 static int smu_fini_fb_allocations(struct smu_context *smu) 892 { 893 struct smu_table_context *smu_table = &smu->smu_table; 894 struct smu_table *tables = smu_table->tables; 895 struct smu_table *driver_table = &(smu_table->driver_table); 896 897 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 898 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 899 &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 900 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 901 902 amdgpu_bo_free_kernel(&driver_table->bo, 903 &driver_table->mc_address, 904 &driver_table->cpu_addr); 905 906 return 0; 907 } 908 909 /** 910 * smu_alloc_memory_pool - allocate memory pool in the system memory 911 * 912 * @smu: amdgpu_device pointer 913 * 914 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 915 * and DramLogSetDramAddr can notify it changed. 916 * 917 * Returns 0 on success, error on failure. 918 */ 919 static int smu_alloc_memory_pool(struct smu_context *smu) 920 { 921 struct amdgpu_device *adev = smu->adev; 922 struct smu_table_context *smu_table = &smu->smu_table; 923 struct smu_table *memory_pool = &smu_table->memory_pool; 924 uint64_t pool_size = smu->pool_size; 925 int ret = 0; 926 927 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 928 return ret; 929 930 memory_pool->size = pool_size; 931 memory_pool->align = PAGE_SIZE; 932 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 933 934 switch (pool_size) { 935 case SMU_MEMORY_POOL_SIZE_256_MB: 936 case SMU_MEMORY_POOL_SIZE_512_MB: 937 case SMU_MEMORY_POOL_SIZE_1_GB: 938 case SMU_MEMORY_POOL_SIZE_2_GB: 939 ret = amdgpu_bo_create_kernel(adev, 940 memory_pool->size, 941 memory_pool->align, 942 memory_pool->domain, 943 &memory_pool->bo, 944 &memory_pool->mc_address, 945 &memory_pool->cpu_addr); 946 if (ret) 947 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 948 break; 949 default: 950 break; 951 } 952 953 return ret; 954 } 955 956 static int smu_free_memory_pool(struct smu_context *smu) 957 { 958 struct smu_table_context *smu_table = &smu->smu_table; 959 struct smu_table *memory_pool = &smu_table->memory_pool; 960 961 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 962 return 0; 963 964 amdgpu_bo_free_kernel(&memory_pool->bo, 965 &memory_pool->mc_address, 966 &memory_pool->cpu_addr); 967 968 memset(memory_pool, 0, sizeof(struct smu_table)); 969 970 return 0; 971 } 972 973 static int smu_alloc_dummy_read_table(struct smu_context *smu) 974 { 975 struct smu_table_context *smu_table = &smu->smu_table; 976 struct smu_table *dummy_read_1_table = 977 &smu_table->dummy_read_1_table; 978 struct amdgpu_device *adev = smu->adev; 979 int ret = 0; 980 981 if (!dummy_read_1_table->size) 982 return 0; 983 984 ret = amdgpu_bo_create_kernel(adev, 985 dummy_read_1_table->size, 986 dummy_read_1_table->align, 987 dummy_read_1_table->domain, 988 &dummy_read_1_table->bo, 989 &dummy_read_1_table->mc_address, 990 &dummy_read_1_table->cpu_addr); 991 if (ret) 992 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 993 994 return ret; 995 } 996 997 static void smu_free_dummy_read_table(struct smu_context *smu) 998 { 999 struct smu_table_context *smu_table = &smu->smu_table; 1000 struct smu_table *dummy_read_1_table = 1001 &smu_table->dummy_read_1_table; 1002 1003 1004 amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 1005 &dummy_read_1_table->mc_address, 1006 &dummy_read_1_table->cpu_addr); 1007 1008 memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 1009 } 1010 1011 static int smu_smc_table_sw_init(struct smu_context *smu) 1012 { 1013 int ret; 1014 1015 /** 1016 * Create smu_table structure, and init smc tables such as 1017 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 1018 */ 1019 ret = smu_init_smc_tables(smu); 1020 if (ret) { 1021 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 1022 return ret; 1023 } 1024 1025 /** 1026 * Create smu_power_context structure, and allocate smu_dpm_context and 1027 * context size to fill the smu_power_context data. 1028 */ 1029 ret = smu_init_power(smu); 1030 if (ret) { 1031 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 1032 return ret; 1033 } 1034 1035 /* 1036 * allocate vram bos to store smc table contents. 1037 */ 1038 ret = smu_init_fb_allocations(smu); 1039 if (ret) 1040 return ret; 1041 1042 ret = smu_alloc_memory_pool(smu); 1043 if (ret) 1044 return ret; 1045 1046 ret = smu_alloc_dummy_read_table(smu); 1047 if (ret) 1048 return ret; 1049 1050 ret = smu_i2c_init(smu); 1051 if (ret) 1052 return ret; 1053 1054 return 0; 1055 } 1056 1057 static int smu_smc_table_sw_fini(struct smu_context *smu) 1058 { 1059 int ret; 1060 1061 smu_i2c_fini(smu); 1062 1063 smu_free_dummy_read_table(smu); 1064 1065 ret = smu_free_memory_pool(smu); 1066 if (ret) 1067 return ret; 1068 1069 ret = smu_fini_fb_allocations(smu); 1070 if (ret) 1071 return ret; 1072 1073 ret = smu_fini_power(smu); 1074 if (ret) { 1075 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 1076 return ret; 1077 } 1078 1079 ret = smu_fini_smc_tables(smu); 1080 if (ret) { 1081 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 1082 return ret; 1083 } 1084 1085 return 0; 1086 } 1087 1088 static void smu_throttling_logging_work_fn(struct work_struct *work) 1089 { 1090 struct smu_context *smu = container_of(work, struct smu_context, 1091 throttling_logging_work); 1092 1093 smu_log_thermal_throttling(smu); 1094 } 1095 1096 static void smu_interrupt_work_fn(struct work_struct *work) 1097 { 1098 struct smu_context *smu = container_of(work, struct smu_context, 1099 interrupt_work); 1100 1101 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 1102 smu->ppt_funcs->interrupt_work(smu); 1103 } 1104 1105 static void smu_swctf_delayed_work_handler(struct work_struct *work) 1106 { 1107 struct smu_context *smu = 1108 container_of(work, struct smu_context, swctf_delayed_work.work); 1109 struct smu_temperature_range *range = 1110 &smu->thermal_range; 1111 struct amdgpu_device *adev = smu->adev; 1112 uint32_t hotspot_tmp, size; 1113 1114 /* 1115 * If the hotspot temperature is confirmed as below SW CTF setting point 1116 * after the delay enforced, nothing will be done. 1117 * Otherwise, a graceful shutdown will be performed to prevent further damage. 1118 */ 1119 if (range->software_shutdown_temp && 1120 smu->ppt_funcs->read_sensor && 1121 !smu->ppt_funcs->read_sensor(smu, 1122 AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 1123 &hotspot_tmp, 1124 &size) && 1125 hotspot_tmp / 1000 < range->software_shutdown_temp) 1126 return; 1127 1128 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 1129 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 1130 orderly_poweroff(true); 1131 } 1132 1133 static void smu_init_xgmi_plpd_mode(struct smu_context *smu) 1134 { 1135 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { 1136 smu->plpd_mode = XGMI_PLPD_DEFAULT; 1137 return; 1138 } 1139 1140 /* PMFW put PLPD into default policy after enabling the feature */ 1141 if (smu_feature_is_enabled(smu, 1142 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) 1143 smu->plpd_mode = XGMI_PLPD_DEFAULT; 1144 else 1145 smu->plpd_mode = XGMI_PLPD_NONE; 1146 } 1147 1148 static int smu_sw_init(void *handle) 1149 { 1150 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1151 struct smu_context *smu = adev->powerplay.pp_handle; 1152 int ret; 1153 1154 smu->pool_size = adev->pm.smu_prv_buffer_size; 1155 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 1156 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 1157 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 1158 1159 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 1160 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 1161 atomic64_set(&smu->throttle_int_counter, 0); 1162 smu->watermarks_bitmap = 0; 1163 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1164 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1165 1166 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 1167 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 1168 1169 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 1170 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 1171 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 1172 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 1173 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 1174 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 1175 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 1176 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 1177 1178 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1179 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1180 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 1181 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 1182 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 1183 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 1184 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 1185 smu->display_config = &adev->pm.pm_display_cfg; 1186 1187 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1188 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1189 1190 INIT_DELAYED_WORK(&smu->swctf_delayed_work, 1191 smu_swctf_delayed_work_handler); 1192 1193 ret = smu_smc_table_sw_init(smu); 1194 if (ret) { 1195 dev_err(adev->dev, "Failed to sw init smc table!\n"); 1196 return ret; 1197 } 1198 1199 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1200 ret = smu_get_vbios_bootup_values(smu); 1201 if (ret) { 1202 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 1203 return ret; 1204 } 1205 1206 ret = smu_init_pptable_microcode(smu); 1207 if (ret) { 1208 dev_err(adev->dev, "Failed to setup pptable firmware!\n"); 1209 return ret; 1210 } 1211 1212 ret = smu_register_irq_handler(smu); 1213 if (ret) { 1214 dev_err(adev->dev, "Failed to register smc irq handler!\n"); 1215 return ret; 1216 } 1217 1218 /* If there is no way to query fan control mode, fan control is not supported */ 1219 if (!smu->ppt_funcs->get_fan_control_mode) 1220 smu->adev->pm.no_fan = true; 1221 1222 return 0; 1223 } 1224 1225 static int smu_sw_fini(void *handle) 1226 { 1227 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1228 struct smu_context *smu = adev->powerplay.pp_handle; 1229 int ret; 1230 1231 ret = smu_smc_table_sw_fini(smu); 1232 if (ret) { 1233 dev_err(adev->dev, "Failed to sw fini smc table!\n"); 1234 return ret; 1235 } 1236 1237 smu_fini_microcode(smu); 1238 1239 return 0; 1240 } 1241 1242 static int smu_get_thermal_temperature_range(struct smu_context *smu) 1243 { 1244 struct amdgpu_device *adev = smu->adev; 1245 struct smu_temperature_range *range = 1246 &smu->thermal_range; 1247 int ret = 0; 1248 1249 if (!smu->ppt_funcs->get_thermal_temperature_range) 1250 return 0; 1251 1252 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 1253 if (ret) 1254 return ret; 1255 1256 adev->pm.dpm.thermal.min_temp = range->min; 1257 adev->pm.dpm.thermal.max_temp = range->max; 1258 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 1259 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 1260 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 1261 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 1262 adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 1263 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 1264 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 1265 1266 return ret; 1267 } 1268 1269 static int smu_smc_hw_setup(struct smu_context *smu) 1270 { 1271 struct smu_feature *feature = &smu->smu_feature; 1272 struct amdgpu_device *adev = smu->adev; 1273 uint8_t pcie_gen = 0, pcie_width = 0; 1274 uint64_t features_supported; 1275 int ret = 0; 1276 1277 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1278 case IP_VERSION(11, 0, 7): 1279 case IP_VERSION(11, 0, 11): 1280 case IP_VERSION(11, 5, 0): 1281 case IP_VERSION(11, 0, 12): 1282 if (adev->in_suspend && smu_is_dpm_running(smu)) { 1283 dev_info(adev->dev, "dpm has been enabled\n"); 1284 ret = smu_system_features_control(smu, true); 1285 if (ret) 1286 dev_err(adev->dev, "Failed system features control!\n"); 1287 return ret; 1288 } 1289 break; 1290 default: 1291 break; 1292 } 1293 1294 ret = smu_init_display_count(smu, 0); 1295 if (ret) { 1296 dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 1297 return ret; 1298 } 1299 1300 ret = smu_set_driver_table_location(smu); 1301 if (ret) { 1302 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 1303 return ret; 1304 } 1305 1306 /* 1307 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1308 */ 1309 ret = smu_set_tool_table_location(smu); 1310 if (ret) { 1311 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 1312 return ret; 1313 } 1314 1315 /* 1316 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1317 * pool location. 1318 */ 1319 ret = smu_notify_memory_pool_location(smu); 1320 if (ret) { 1321 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 1322 return ret; 1323 } 1324 1325 /* 1326 * It is assumed the pptable used before runpm is same as 1327 * the one used afterwards. Thus, we can reuse the stored 1328 * copy and do not need to resetup the pptable again. 1329 */ 1330 if (!adev->in_runpm) { 1331 ret = smu_setup_pptable(smu); 1332 if (ret) { 1333 dev_err(adev->dev, "Failed to setup pptable!\n"); 1334 return ret; 1335 } 1336 } 1337 1338 /* smu_dump_pptable(smu); */ 1339 1340 /* 1341 * With SCPM enabled, PSP is responsible for the PPTable transferring 1342 * (to SMU). Driver involvement is not needed and permitted. 1343 */ 1344 if (!adev->scpm_enabled) { 1345 /* 1346 * Copy pptable bo in the vram to smc with SMU MSGs such as 1347 * SetDriverDramAddr and TransferTableDram2Smu. 1348 */ 1349 ret = smu_write_pptable(smu); 1350 if (ret) { 1351 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 1352 return ret; 1353 } 1354 } 1355 1356 /* issue Run*Btc msg */ 1357 ret = smu_run_btc(smu); 1358 if (ret) 1359 return ret; 1360 1361 /* 1362 * With SCPM enabled, these actions(and relevant messages) are 1363 * not needed and permitted. 1364 */ 1365 if (!adev->scpm_enabled) { 1366 ret = smu_feature_set_allowed_mask(smu); 1367 if (ret) { 1368 dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 1369 return ret; 1370 } 1371 } 1372 1373 ret = smu_system_features_control(smu, true); 1374 if (ret) { 1375 dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 1376 return ret; 1377 } 1378 1379 smu_init_xgmi_plpd_mode(smu); 1380 1381 ret = smu_feature_get_enabled_mask(smu, &features_supported); 1382 if (ret) { 1383 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); 1384 return ret; 1385 } 1386 bitmap_copy(feature->supported, 1387 (unsigned long *)&features_supported, 1388 feature->feature_num); 1389 1390 if (!smu_is_dpm_running(smu)) 1391 dev_info(adev->dev, "dpm has been disabled\n"); 1392 1393 /* 1394 * Set initialized values (get from vbios) to dpm tables context such as 1395 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1396 * type of clks. 1397 */ 1398 ret = smu_set_default_dpm_table(smu); 1399 if (ret) { 1400 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 1401 return ret; 1402 } 1403 1404 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1405 pcie_gen = 3; 1406 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1407 pcie_gen = 2; 1408 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1409 pcie_gen = 1; 1410 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1411 pcie_gen = 0; 1412 1413 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1414 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1415 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1416 */ 1417 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1418 pcie_width = 6; 1419 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1420 pcie_width = 5; 1421 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1422 pcie_width = 4; 1423 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1424 pcie_width = 3; 1425 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1426 pcie_width = 2; 1427 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1428 pcie_width = 1; 1429 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1430 if (ret) { 1431 dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1432 return ret; 1433 } 1434 1435 ret = smu_get_thermal_temperature_range(smu); 1436 if (ret) { 1437 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1438 return ret; 1439 } 1440 1441 ret = smu_enable_thermal_alert(smu); 1442 if (ret) { 1443 dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1444 return ret; 1445 } 1446 1447 ret = smu_notify_display_change(smu); 1448 if (ret) { 1449 dev_err(adev->dev, "Failed to notify display change!\n"); 1450 return ret; 1451 } 1452 1453 /* 1454 * Set min deep sleep dce fclk with bootup value from vbios via 1455 * SetMinDeepSleepDcefclk MSG. 1456 */ 1457 ret = smu_set_min_dcef_deep_sleep(smu, 1458 smu->smu_table.boot_values.dcefclk / 100); 1459 1460 return ret; 1461 } 1462 1463 static int smu_start_smc_engine(struct smu_context *smu) 1464 { 1465 struct amdgpu_device *adev = smu->adev; 1466 int ret = 0; 1467 1468 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1469 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) { 1470 if (smu->ppt_funcs->load_microcode) { 1471 ret = smu->ppt_funcs->load_microcode(smu); 1472 if (ret) 1473 return ret; 1474 } 1475 } 1476 } 1477 1478 if (smu->ppt_funcs->check_fw_status) { 1479 ret = smu->ppt_funcs->check_fw_status(smu); 1480 if (ret) { 1481 dev_err(adev->dev, "SMC is not ready\n"); 1482 return ret; 1483 } 1484 } 1485 1486 /* 1487 * Send msg GetDriverIfVersion to check if the return value is equal 1488 * with DRIVER_IF_VERSION of smc header. 1489 */ 1490 ret = smu_check_fw_version(smu); 1491 if (ret) 1492 return ret; 1493 1494 return ret; 1495 } 1496 1497 static int smu_hw_init(void *handle) 1498 { 1499 int ret; 1500 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1501 struct smu_context *smu = adev->powerplay.pp_handle; 1502 1503 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 1504 smu->pm_enabled = false; 1505 return 0; 1506 } 1507 1508 ret = smu_start_smc_engine(smu); 1509 if (ret) { 1510 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1511 return ret; 1512 } 1513 1514 if (smu->is_apu) { 1515 ret = smu_set_gfx_imu_enable(smu); 1516 if (ret) 1517 return ret; 1518 smu_dpm_set_vcn_enable(smu, true); 1519 smu_dpm_set_jpeg_enable(smu, true); 1520 smu_set_gfx_cgpg(smu, true); 1521 } 1522 1523 if (!smu->pm_enabled) 1524 return 0; 1525 1526 ret = smu_get_driver_allowed_feature_mask(smu); 1527 if (ret) 1528 return ret; 1529 1530 ret = smu_smc_hw_setup(smu); 1531 if (ret) { 1532 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1533 return ret; 1534 } 1535 1536 /* 1537 * Move maximum sustainable clock retrieving here considering 1538 * 1. It is not needed on resume(from S3). 1539 * 2. DAL settings come between .hw_init and .late_init of SMU. 1540 * And DAL needs to know the maximum sustainable clocks. Thus 1541 * it cannot be put in .late_init(). 1542 */ 1543 ret = smu_init_max_sustainable_clocks(smu); 1544 if (ret) { 1545 dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1546 return ret; 1547 } 1548 1549 adev->pm.dpm_enabled = true; 1550 1551 dev_info(adev->dev, "SMU is initialized successfully!\n"); 1552 1553 return 0; 1554 } 1555 1556 static int smu_disable_dpms(struct smu_context *smu) 1557 { 1558 struct amdgpu_device *adev = smu->adev; 1559 int ret = 0; 1560 bool use_baco = !smu->is_apu && 1561 ((amdgpu_in_reset(adev) && 1562 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1563 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); 1564 1565 /* 1566 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) 1567 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues. 1568 */ 1569 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1570 case IP_VERSION(13, 0, 0): 1571 case IP_VERSION(13, 0, 7): 1572 case IP_VERSION(13, 0, 10): 1573 return 0; 1574 default: 1575 break; 1576 } 1577 1578 /* 1579 * For custom pptable uploading, skip the DPM features 1580 * disable process on Navi1x ASICs. 1581 * - As the gfx related features are under control of 1582 * RLC on those ASICs. RLC reinitialization will be 1583 * needed to reenable them. That will cost much more 1584 * efforts. 1585 * 1586 * - SMU firmware can handle the DPM reenablement 1587 * properly. 1588 */ 1589 if (smu->uploading_custom_pp_table) { 1590 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1591 case IP_VERSION(11, 0, 0): 1592 case IP_VERSION(11, 0, 5): 1593 case IP_VERSION(11, 0, 9): 1594 case IP_VERSION(11, 0, 7): 1595 case IP_VERSION(11, 0, 11): 1596 case IP_VERSION(11, 5, 0): 1597 case IP_VERSION(11, 0, 12): 1598 case IP_VERSION(11, 0, 13): 1599 return 0; 1600 default: 1601 break; 1602 } 1603 } 1604 1605 /* 1606 * For Sienna_Cichlid, PMFW will handle the features disablement properly 1607 * on BACO in. Driver involvement is unnecessary. 1608 */ 1609 if (use_baco) { 1610 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1611 case IP_VERSION(11, 0, 7): 1612 case IP_VERSION(11, 0, 0): 1613 case IP_VERSION(11, 0, 5): 1614 case IP_VERSION(11, 0, 9): 1615 case IP_VERSION(13, 0, 7): 1616 return 0; 1617 default: 1618 break; 1619 } 1620 } 1621 1622 /* 1623 * For SMU 13.0.4/11, PMFW will handle the features disablement properly 1624 * for gpu reset and S0i3 cases. Driver involvement is unnecessary. 1625 */ 1626 if (amdgpu_in_reset(adev) || adev->in_s0ix) { 1627 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1628 case IP_VERSION(13, 0, 4): 1629 case IP_VERSION(13, 0, 11): 1630 return 0; 1631 default: 1632 break; 1633 } 1634 } 1635 1636 /* 1637 * For gpu reset, runpm and hibernation through BACO, 1638 * BACO feature has to be kept enabled. 1639 */ 1640 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1641 ret = smu_disable_all_features_with_exception(smu, 1642 SMU_FEATURE_BACO_BIT); 1643 if (ret) 1644 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 1645 } else { 1646 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */ 1647 if (!adev->scpm_enabled) { 1648 ret = smu_system_features_control(smu, false); 1649 if (ret) 1650 dev_err(adev->dev, "Failed to disable smu features.\n"); 1651 } 1652 } 1653 1654 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) && 1655 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop) 1656 adev->gfx.rlc.funcs->stop(adev); 1657 1658 return ret; 1659 } 1660 1661 static int smu_smc_hw_cleanup(struct smu_context *smu) 1662 { 1663 struct amdgpu_device *adev = smu->adev; 1664 int ret = 0; 1665 1666 cancel_work_sync(&smu->throttling_logging_work); 1667 cancel_work_sync(&smu->interrupt_work); 1668 1669 ret = smu_disable_thermal_alert(smu); 1670 if (ret) { 1671 dev_err(adev->dev, "Fail to disable thermal alert!\n"); 1672 return ret; 1673 } 1674 1675 cancel_delayed_work_sync(&smu->swctf_delayed_work); 1676 1677 ret = smu_disable_dpms(smu); 1678 if (ret) { 1679 dev_err(adev->dev, "Fail to disable dpm features!\n"); 1680 return ret; 1681 } 1682 1683 return 0; 1684 } 1685 1686 static int smu_hw_fini(void *handle) 1687 { 1688 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1689 struct smu_context *smu = adev->powerplay.pp_handle; 1690 1691 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1692 return 0; 1693 1694 smu_dpm_set_vcn_enable(smu, false); 1695 smu_dpm_set_jpeg_enable(smu, false); 1696 1697 adev->vcn.cur_state = AMD_PG_STATE_GATE; 1698 adev->jpeg.cur_state = AMD_PG_STATE_GATE; 1699 1700 if (!smu->pm_enabled) 1701 return 0; 1702 1703 adev->pm.dpm_enabled = false; 1704 1705 return smu_smc_hw_cleanup(smu); 1706 } 1707 1708 static void smu_late_fini(void *handle) 1709 { 1710 struct amdgpu_device *adev = handle; 1711 struct smu_context *smu = adev->powerplay.pp_handle; 1712 1713 kfree(smu); 1714 } 1715 1716 static int smu_reset(struct smu_context *smu) 1717 { 1718 struct amdgpu_device *adev = smu->adev; 1719 int ret; 1720 1721 ret = smu_hw_fini(adev); 1722 if (ret) 1723 return ret; 1724 1725 ret = smu_hw_init(adev); 1726 if (ret) 1727 return ret; 1728 1729 ret = smu_late_init(adev); 1730 if (ret) 1731 return ret; 1732 1733 return 0; 1734 } 1735 1736 static int smu_suspend(void *handle) 1737 { 1738 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1739 struct smu_context *smu = adev->powerplay.pp_handle; 1740 int ret; 1741 uint64_t count; 1742 1743 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1744 return 0; 1745 1746 if (!smu->pm_enabled) 1747 return 0; 1748 1749 adev->pm.dpm_enabled = false; 1750 1751 ret = smu_smc_hw_cleanup(smu); 1752 if (ret) 1753 return ret; 1754 1755 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1756 1757 smu_set_gfx_cgpg(smu, false); 1758 1759 /* 1760 * pwfw resets entrycount when device is suspended, so we save the 1761 * last value to be used when we resume to keep it consistent 1762 */ 1763 ret = smu_get_entrycount_gfxoff(smu, &count); 1764 if (!ret) 1765 adev->gfx.gfx_off_entrycount = count; 1766 1767 return 0; 1768 } 1769 1770 static int smu_resume(void *handle) 1771 { 1772 int ret; 1773 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1774 struct smu_context *smu = adev->powerplay.pp_handle; 1775 1776 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1777 return 0; 1778 1779 if (!smu->pm_enabled) 1780 return 0; 1781 1782 dev_info(adev->dev, "SMU is resuming...\n"); 1783 1784 ret = smu_start_smc_engine(smu); 1785 if (ret) { 1786 dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1787 return ret; 1788 } 1789 1790 ret = smu_smc_hw_setup(smu); 1791 if (ret) { 1792 dev_err(adev->dev, "Failed to setup smc hw!\n"); 1793 return ret; 1794 } 1795 1796 ret = smu_set_gfx_imu_enable(smu); 1797 if (ret) 1798 return ret; 1799 1800 smu_set_gfx_cgpg(smu, true); 1801 1802 smu->disable_uclk_switch = 0; 1803 1804 adev->pm.dpm_enabled = true; 1805 1806 dev_info(adev->dev, "SMU is resumed successfully!\n"); 1807 1808 return 0; 1809 } 1810 1811 static int smu_display_configuration_change(void *handle, 1812 const struct amd_pp_display_configuration *display_config) 1813 { 1814 struct smu_context *smu = handle; 1815 1816 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1817 return -EOPNOTSUPP; 1818 1819 if (!display_config) 1820 return -EINVAL; 1821 1822 smu_set_min_dcef_deep_sleep(smu, 1823 display_config->min_dcef_deep_sleep_set_clk / 100); 1824 1825 return 0; 1826 } 1827 1828 static int smu_set_clockgating_state(void *handle, 1829 enum amd_clockgating_state state) 1830 { 1831 return 0; 1832 } 1833 1834 static int smu_set_powergating_state(void *handle, 1835 enum amd_powergating_state state) 1836 { 1837 return 0; 1838 } 1839 1840 static int smu_enable_umd_pstate(void *handle, 1841 enum amd_dpm_forced_level *level) 1842 { 1843 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1844 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1845 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1846 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1847 1848 struct smu_context *smu = (struct smu_context*)(handle); 1849 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1850 1851 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1852 return -EINVAL; 1853 1854 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1855 /* enter umd pstate, save current level, disable gfx cg*/ 1856 if (*level & profile_mode_mask) { 1857 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1858 smu_gpo_control(smu, false); 1859 smu_gfx_ulv_control(smu, false); 1860 smu_deep_sleep_control(smu, false); 1861 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 1862 } 1863 } else { 1864 /* exit umd pstate, restore level, enable gfx cg*/ 1865 if (!(*level & profile_mode_mask)) { 1866 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1867 *level = smu_dpm_ctx->saved_dpm_level; 1868 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 1869 smu_deep_sleep_control(smu, true); 1870 smu_gfx_ulv_control(smu, true); 1871 smu_gpo_control(smu, true); 1872 } 1873 } 1874 1875 return 0; 1876 } 1877 1878 static int smu_bump_power_profile_mode(struct smu_context *smu, 1879 long *param, 1880 uint32_t param_size) 1881 { 1882 int ret = 0; 1883 1884 if (smu->ppt_funcs->set_power_profile_mode) 1885 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 1886 1887 return ret; 1888 } 1889 1890 static int smu_adjust_power_state_dynamic(struct smu_context *smu, 1891 enum amd_dpm_forced_level level, 1892 bool skip_display_settings) 1893 { 1894 int ret = 0; 1895 int index = 0; 1896 long workload; 1897 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1898 1899 if (!skip_display_settings) { 1900 ret = smu_display_config_changed(smu); 1901 if (ret) { 1902 dev_err(smu->adev->dev, "Failed to change display config!"); 1903 return ret; 1904 } 1905 } 1906 1907 ret = smu_apply_clocks_adjust_rules(smu); 1908 if (ret) { 1909 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 1910 return ret; 1911 } 1912 1913 if (!skip_display_settings) { 1914 ret = smu_notify_smc_display_config(smu); 1915 if (ret) { 1916 dev_err(smu->adev->dev, "Failed to notify smc display config!"); 1917 return ret; 1918 } 1919 } 1920 1921 if (smu_dpm_ctx->dpm_level != level) { 1922 ret = smu_asic_set_performance_level(smu, level); 1923 if (ret) { 1924 dev_err(smu->adev->dev, "Failed to set performance level!"); 1925 return ret; 1926 } 1927 1928 /* update the saved copy */ 1929 smu_dpm_ctx->dpm_level = level; 1930 } 1931 1932 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 1933 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 1934 index = fls(smu->workload_mask); 1935 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1936 workload = smu->workload_setting[index]; 1937 1938 if (smu->power_profile_mode != workload) 1939 smu_bump_power_profile_mode(smu, &workload, 0); 1940 } 1941 1942 return ret; 1943 } 1944 1945 static int smu_handle_task(struct smu_context *smu, 1946 enum amd_dpm_forced_level level, 1947 enum amd_pp_task task_id) 1948 { 1949 int ret = 0; 1950 1951 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1952 return -EOPNOTSUPP; 1953 1954 switch (task_id) { 1955 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1956 ret = smu_pre_display_config_changed(smu); 1957 if (ret) 1958 return ret; 1959 ret = smu_adjust_power_state_dynamic(smu, level, false); 1960 break; 1961 case AMD_PP_TASK_COMPLETE_INIT: 1962 case AMD_PP_TASK_READJUST_POWER_STATE: 1963 ret = smu_adjust_power_state_dynamic(smu, level, true); 1964 break; 1965 default: 1966 break; 1967 } 1968 1969 return ret; 1970 } 1971 1972 static int smu_handle_dpm_task(void *handle, 1973 enum amd_pp_task task_id, 1974 enum amd_pm_state_type *user_state) 1975 { 1976 struct smu_context *smu = handle; 1977 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1978 1979 return smu_handle_task(smu, smu_dpm->dpm_level, task_id); 1980 1981 } 1982 1983 static int smu_switch_power_profile(void *handle, 1984 enum PP_SMC_POWER_PROFILE type, 1985 bool en) 1986 { 1987 struct smu_context *smu = handle; 1988 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1989 long workload; 1990 uint32_t index; 1991 1992 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1993 return -EOPNOTSUPP; 1994 1995 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1996 return -EINVAL; 1997 1998 if (!en) { 1999 smu->workload_mask &= ~(1 << smu->workload_prority[type]); 2000 index = fls(smu->workload_mask); 2001 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 2002 workload = smu->workload_setting[index]; 2003 } else { 2004 smu->workload_mask |= (1 << smu->workload_prority[type]); 2005 index = fls(smu->workload_mask); 2006 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 2007 workload = smu->workload_setting[index]; 2008 } 2009 2010 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 2011 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) 2012 smu_bump_power_profile_mode(smu, &workload, 0); 2013 2014 return 0; 2015 } 2016 2017 static enum amd_dpm_forced_level smu_get_performance_level(void *handle) 2018 { 2019 struct smu_context *smu = handle; 2020 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2021 2022 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2023 return -EOPNOTSUPP; 2024 2025 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 2026 return -EINVAL; 2027 2028 return smu_dpm_ctx->dpm_level; 2029 } 2030 2031 static int smu_force_performance_level(void *handle, 2032 enum amd_dpm_forced_level level) 2033 { 2034 struct smu_context *smu = handle; 2035 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2036 int ret = 0; 2037 2038 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2039 return -EOPNOTSUPP; 2040 2041 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 2042 return -EINVAL; 2043 2044 ret = smu_enable_umd_pstate(smu, &level); 2045 if (ret) 2046 return ret; 2047 2048 ret = smu_handle_task(smu, level, 2049 AMD_PP_TASK_READJUST_POWER_STATE); 2050 2051 /* reset user dpm clock state */ 2052 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 2053 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); 2054 smu->user_dpm_profile.clk_dependency = 0; 2055 } 2056 2057 return ret; 2058 } 2059 2060 static int smu_set_display_count(void *handle, uint32_t count) 2061 { 2062 struct smu_context *smu = handle; 2063 2064 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2065 return -EOPNOTSUPP; 2066 2067 return smu_init_display_count(smu, count); 2068 } 2069 2070 static int smu_force_smuclk_levels(struct smu_context *smu, 2071 enum smu_clk_type clk_type, 2072 uint32_t mask) 2073 { 2074 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2075 int ret = 0; 2076 2077 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2078 return -EOPNOTSUPP; 2079 2080 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 2081 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 2082 return -EINVAL; 2083 } 2084 2085 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { 2086 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 2087 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2088 smu->user_dpm_profile.clk_mask[clk_type] = mask; 2089 smu_set_user_clk_dependencies(smu, clk_type); 2090 } 2091 } 2092 2093 return ret; 2094 } 2095 2096 static int smu_force_ppclk_levels(void *handle, 2097 enum pp_clock_type type, 2098 uint32_t mask) 2099 { 2100 struct smu_context *smu = handle; 2101 enum smu_clk_type clk_type; 2102 2103 switch (type) { 2104 case PP_SCLK: 2105 clk_type = SMU_SCLK; break; 2106 case PP_MCLK: 2107 clk_type = SMU_MCLK; break; 2108 case PP_PCIE: 2109 clk_type = SMU_PCIE; break; 2110 case PP_SOCCLK: 2111 clk_type = SMU_SOCCLK; break; 2112 case PP_FCLK: 2113 clk_type = SMU_FCLK; break; 2114 case PP_DCEFCLK: 2115 clk_type = SMU_DCEFCLK; break; 2116 case PP_VCLK: 2117 clk_type = SMU_VCLK; break; 2118 case PP_VCLK1: 2119 clk_type = SMU_VCLK1; break; 2120 case PP_DCLK: 2121 clk_type = SMU_DCLK; break; 2122 case PP_DCLK1: 2123 clk_type = SMU_DCLK1; break; 2124 case OD_SCLK: 2125 clk_type = SMU_OD_SCLK; break; 2126 case OD_MCLK: 2127 clk_type = SMU_OD_MCLK; break; 2128 case OD_VDDC_CURVE: 2129 clk_type = SMU_OD_VDDC_CURVE; break; 2130 case OD_RANGE: 2131 clk_type = SMU_OD_RANGE; break; 2132 default: 2133 return -EINVAL; 2134 } 2135 2136 return smu_force_smuclk_levels(smu, clk_type, mask); 2137 } 2138 2139 /* 2140 * On system suspending or resetting, the dpm_enabled 2141 * flag will be cleared. So that those SMU services which 2142 * are not supported will be gated. 2143 * However, the mp1 state setting should still be granted 2144 * even if the dpm_enabled cleared. 2145 */ 2146 static int smu_set_mp1_state(void *handle, 2147 enum pp_mp1_state mp1_state) 2148 { 2149 struct smu_context *smu = handle; 2150 int ret = 0; 2151 2152 if (!smu->pm_enabled) 2153 return -EOPNOTSUPP; 2154 2155 if (smu->ppt_funcs && 2156 smu->ppt_funcs->set_mp1_state) 2157 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); 2158 2159 return ret; 2160 } 2161 2162 static int smu_set_df_cstate(void *handle, 2163 enum pp_df_cstate state) 2164 { 2165 struct smu_context *smu = handle; 2166 int ret = 0; 2167 2168 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2169 return -EOPNOTSUPP; 2170 2171 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 2172 return 0; 2173 2174 ret = smu->ppt_funcs->set_df_cstate(smu, state); 2175 if (ret) 2176 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 2177 2178 return ret; 2179 } 2180 2181 int smu_write_watermarks_table(struct smu_context *smu) 2182 { 2183 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2184 return -EOPNOTSUPP; 2185 2186 return smu_set_watermarks_table(smu, NULL); 2187 } 2188 2189 static int smu_set_watermarks_for_clock_ranges(void *handle, 2190 struct pp_smu_wm_range_sets *clock_ranges) 2191 { 2192 struct smu_context *smu = handle; 2193 2194 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2195 return -EOPNOTSUPP; 2196 2197 if (smu->disable_watermark) 2198 return 0; 2199 2200 return smu_set_watermarks_table(smu, clock_ranges); 2201 } 2202 2203 int smu_set_ac_dc(struct smu_context *smu) 2204 { 2205 int ret = 0; 2206 2207 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2208 return -EOPNOTSUPP; 2209 2210 /* controlled by firmware */ 2211 if (smu->dc_controlled_by_gpio) 2212 return 0; 2213 2214 ret = smu_set_power_source(smu, 2215 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 2216 SMU_POWER_SOURCE_DC); 2217 if (ret) 2218 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 2219 smu->adev->pm.ac_power ? "AC" : "DC"); 2220 2221 return ret; 2222 } 2223 2224 const struct amd_ip_funcs smu_ip_funcs = { 2225 .name = "smu", 2226 .early_init = smu_early_init, 2227 .late_init = smu_late_init, 2228 .sw_init = smu_sw_init, 2229 .sw_fini = smu_sw_fini, 2230 .hw_init = smu_hw_init, 2231 .hw_fini = smu_hw_fini, 2232 .late_fini = smu_late_fini, 2233 .suspend = smu_suspend, 2234 .resume = smu_resume, 2235 .is_idle = NULL, 2236 .check_soft_reset = NULL, 2237 .wait_for_idle = NULL, 2238 .soft_reset = NULL, 2239 .set_clockgating_state = smu_set_clockgating_state, 2240 .set_powergating_state = smu_set_powergating_state, 2241 }; 2242 2243 const struct amdgpu_ip_block_version smu_v11_0_ip_block = { 2244 .type = AMD_IP_BLOCK_TYPE_SMC, 2245 .major = 11, 2246 .minor = 0, 2247 .rev = 0, 2248 .funcs = &smu_ip_funcs, 2249 }; 2250 2251 const struct amdgpu_ip_block_version smu_v12_0_ip_block = { 2252 .type = AMD_IP_BLOCK_TYPE_SMC, 2253 .major = 12, 2254 .minor = 0, 2255 .rev = 0, 2256 .funcs = &smu_ip_funcs, 2257 }; 2258 2259 const struct amdgpu_ip_block_version smu_v13_0_ip_block = { 2260 .type = AMD_IP_BLOCK_TYPE_SMC, 2261 .major = 13, 2262 .minor = 0, 2263 .rev = 0, 2264 .funcs = &smu_ip_funcs, 2265 }; 2266 2267 static int smu_load_microcode(void *handle) 2268 { 2269 struct smu_context *smu = handle; 2270 struct amdgpu_device *adev = smu->adev; 2271 int ret = 0; 2272 2273 if (!smu->pm_enabled) 2274 return -EOPNOTSUPP; 2275 2276 /* This should be used for non PSP loading */ 2277 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 2278 return 0; 2279 2280 if (smu->ppt_funcs->load_microcode) { 2281 ret = smu->ppt_funcs->load_microcode(smu); 2282 if (ret) { 2283 dev_err(adev->dev, "Load microcode failed\n"); 2284 return ret; 2285 } 2286 } 2287 2288 if (smu->ppt_funcs->check_fw_status) { 2289 ret = smu->ppt_funcs->check_fw_status(smu); 2290 if (ret) { 2291 dev_err(adev->dev, "SMC is not ready\n"); 2292 return ret; 2293 } 2294 } 2295 2296 return ret; 2297 } 2298 2299 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2300 { 2301 int ret = 0; 2302 2303 if (smu->ppt_funcs->set_gfx_cgpg) 2304 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2305 2306 return ret; 2307 } 2308 2309 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) 2310 { 2311 struct smu_context *smu = handle; 2312 int ret = 0; 2313 2314 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2315 return -EOPNOTSUPP; 2316 2317 if (!smu->ppt_funcs->set_fan_speed_rpm) 2318 return -EOPNOTSUPP; 2319 2320 if (speed == U32_MAX) 2321 return -EINVAL; 2322 2323 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 2324 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2325 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; 2326 smu->user_dpm_profile.fan_speed_rpm = speed; 2327 2328 /* Override custom PWM setting as they cannot co-exist */ 2329 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; 2330 smu->user_dpm_profile.fan_speed_pwm = 0; 2331 } 2332 2333 return ret; 2334 } 2335 2336 /** 2337 * smu_get_power_limit - Request one of the SMU Power Limits 2338 * 2339 * @handle: pointer to smu context 2340 * @limit: requested limit is written back to this variable 2341 * @pp_limit_level: &pp_power_limit_level which limit of the power to return 2342 * @pp_power_type: &pp_power_type type of power 2343 * Return: 0 on success, <0 on error 2344 * 2345 */ 2346 int smu_get_power_limit(void *handle, 2347 uint32_t *limit, 2348 enum pp_power_limit_level pp_limit_level, 2349 enum pp_power_type pp_power_type) 2350 { 2351 struct smu_context *smu = handle; 2352 struct amdgpu_device *adev = smu->adev; 2353 enum smu_ppt_limit_level limit_level; 2354 uint32_t limit_type; 2355 int ret = 0; 2356 2357 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2358 return -EOPNOTSUPP; 2359 2360 switch (pp_power_type) { 2361 case PP_PWR_TYPE_SUSTAINED: 2362 limit_type = SMU_DEFAULT_PPT_LIMIT; 2363 break; 2364 case PP_PWR_TYPE_FAST: 2365 limit_type = SMU_FAST_PPT_LIMIT; 2366 break; 2367 default: 2368 return -EOPNOTSUPP; 2369 break; 2370 } 2371 2372 switch (pp_limit_level) { 2373 case PP_PWR_LIMIT_CURRENT: 2374 limit_level = SMU_PPT_LIMIT_CURRENT; 2375 break; 2376 case PP_PWR_LIMIT_DEFAULT: 2377 limit_level = SMU_PPT_LIMIT_DEFAULT; 2378 break; 2379 case PP_PWR_LIMIT_MAX: 2380 limit_level = SMU_PPT_LIMIT_MAX; 2381 break; 2382 case PP_PWR_LIMIT_MIN: 2383 default: 2384 return -EOPNOTSUPP; 2385 break; 2386 } 2387 2388 if (limit_type != SMU_DEFAULT_PPT_LIMIT) { 2389 if (smu->ppt_funcs->get_ppt_limit) 2390 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); 2391 } else { 2392 switch (limit_level) { 2393 case SMU_PPT_LIMIT_CURRENT: 2394 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2395 case IP_VERSION(13, 0, 2): 2396 case IP_VERSION(11, 0, 7): 2397 case IP_VERSION(11, 0, 11): 2398 case IP_VERSION(11, 0, 12): 2399 case IP_VERSION(11, 0, 13): 2400 ret = smu_get_asic_power_limits(smu, 2401 &smu->current_power_limit, 2402 NULL, 2403 NULL); 2404 break; 2405 default: 2406 break; 2407 } 2408 *limit = smu->current_power_limit; 2409 break; 2410 case SMU_PPT_LIMIT_DEFAULT: 2411 *limit = smu->default_power_limit; 2412 break; 2413 case SMU_PPT_LIMIT_MAX: 2414 *limit = smu->max_power_limit; 2415 break; 2416 default: 2417 break; 2418 } 2419 } 2420 2421 return ret; 2422 } 2423 2424 static int smu_set_power_limit(void *handle, uint32_t limit) 2425 { 2426 struct smu_context *smu = handle; 2427 uint32_t limit_type = limit >> 24; 2428 int ret = 0; 2429 2430 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2431 return -EOPNOTSUPP; 2432 2433 limit &= (1<<24)-1; 2434 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 2435 if (smu->ppt_funcs->set_power_limit) 2436 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 2437 2438 if (limit > smu->max_power_limit) { 2439 dev_err(smu->adev->dev, 2440 "New power limit (%d) is over the max allowed %d\n", 2441 limit, smu->max_power_limit); 2442 return -EINVAL; 2443 } 2444 2445 if (!limit) 2446 limit = smu->current_power_limit; 2447 2448 if (smu->ppt_funcs->set_power_limit) { 2449 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 2450 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 2451 smu->user_dpm_profile.power_limit = limit; 2452 } 2453 2454 return ret; 2455 } 2456 2457 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 2458 { 2459 int ret = 0; 2460 2461 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2462 return -EOPNOTSUPP; 2463 2464 if (smu->ppt_funcs->print_clk_levels) 2465 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 2466 2467 return ret; 2468 } 2469 2470 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) 2471 { 2472 enum smu_clk_type clk_type; 2473 2474 switch (type) { 2475 case PP_SCLK: 2476 clk_type = SMU_SCLK; break; 2477 case PP_MCLK: 2478 clk_type = SMU_MCLK; break; 2479 case PP_PCIE: 2480 clk_type = SMU_PCIE; break; 2481 case PP_SOCCLK: 2482 clk_type = SMU_SOCCLK; break; 2483 case PP_FCLK: 2484 clk_type = SMU_FCLK; break; 2485 case PP_DCEFCLK: 2486 clk_type = SMU_DCEFCLK; break; 2487 case PP_VCLK: 2488 clk_type = SMU_VCLK; break; 2489 case PP_VCLK1: 2490 clk_type = SMU_VCLK1; break; 2491 case PP_DCLK: 2492 clk_type = SMU_DCLK; break; 2493 case PP_DCLK1: 2494 clk_type = SMU_DCLK1; break; 2495 case OD_SCLK: 2496 clk_type = SMU_OD_SCLK; break; 2497 case OD_MCLK: 2498 clk_type = SMU_OD_MCLK; break; 2499 case OD_VDDC_CURVE: 2500 clk_type = SMU_OD_VDDC_CURVE; break; 2501 case OD_RANGE: 2502 clk_type = SMU_OD_RANGE; break; 2503 case OD_VDDGFX_OFFSET: 2504 clk_type = SMU_OD_VDDGFX_OFFSET; break; 2505 case OD_CCLK: 2506 clk_type = SMU_OD_CCLK; break; 2507 case OD_FAN_CURVE: 2508 clk_type = SMU_OD_FAN_CURVE; break; 2509 case OD_ACOUSTIC_LIMIT: 2510 clk_type = SMU_OD_ACOUSTIC_LIMIT; break; 2511 case OD_ACOUSTIC_TARGET: 2512 clk_type = SMU_OD_ACOUSTIC_TARGET; break; 2513 case OD_FAN_TARGET_TEMPERATURE: 2514 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break; 2515 case OD_FAN_MINIMUM_PWM: 2516 clk_type = SMU_OD_FAN_MINIMUM_PWM; break; 2517 default: 2518 clk_type = SMU_CLK_COUNT; break; 2519 } 2520 2521 return clk_type; 2522 } 2523 2524 static int smu_print_ppclk_levels(void *handle, 2525 enum pp_clock_type type, 2526 char *buf) 2527 { 2528 struct smu_context *smu = handle; 2529 enum smu_clk_type clk_type; 2530 2531 clk_type = smu_convert_to_smuclk(type); 2532 if (clk_type == SMU_CLK_COUNT) 2533 return -EINVAL; 2534 2535 return smu_print_smuclk_levels(smu, clk_type, buf); 2536 } 2537 2538 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset) 2539 { 2540 struct smu_context *smu = handle; 2541 enum smu_clk_type clk_type; 2542 2543 clk_type = smu_convert_to_smuclk(type); 2544 if (clk_type == SMU_CLK_COUNT) 2545 return -EINVAL; 2546 2547 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2548 return -EOPNOTSUPP; 2549 2550 if (!smu->ppt_funcs->emit_clk_levels) 2551 return -ENOENT; 2552 2553 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); 2554 2555 } 2556 2557 static int smu_od_edit_dpm_table(void *handle, 2558 enum PP_OD_DPM_TABLE_COMMAND type, 2559 long *input, uint32_t size) 2560 { 2561 struct smu_context *smu = handle; 2562 int ret = 0; 2563 2564 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2565 return -EOPNOTSUPP; 2566 2567 if (smu->ppt_funcs->od_edit_dpm_table) { 2568 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 2569 } 2570 2571 return ret; 2572 } 2573 2574 static int smu_read_sensor(void *handle, 2575 int sensor, 2576 void *data, 2577 int *size_arg) 2578 { 2579 struct smu_context *smu = handle; 2580 struct smu_umd_pstate_table *pstate_table = 2581 &smu->pstate_table; 2582 int ret = 0; 2583 uint32_t *size, size_val; 2584 2585 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2586 return -EOPNOTSUPP; 2587 2588 if (!data || !size_arg) 2589 return -EINVAL; 2590 2591 size_val = *size_arg; 2592 size = &size_val; 2593 2594 if (smu->ppt_funcs->read_sensor) 2595 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 2596 goto unlock; 2597 2598 switch (sensor) { 2599 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2600 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 2601 *size = 4; 2602 break; 2603 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 2604 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2605 *size = 4; 2606 break; 2607 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 2608 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100; 2609 *size = 4; 2610 break; 2611 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 2612 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100; 2613 *size = 4; 2614 break; 2615 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2616 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); 2617 *size = 8; 2618 break; 2619 case AMDGPU_PP_SENSOR_UVD_POWER: 2620 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 2621 *size = 4; 2622 break; 2623 case AMDGPU_PP_SENSOR_VCE_POWER: 2624 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 2625 *size = 4; 2626 break; 2627 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2628 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1; 2629 *size = 4; 2630 break; 2631 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 2632 *(uint32_t *)data = 0; 2633 *size = 4; 2634 break; 2635 default: 2636 *size = 0; 2637 ret = -EOPNOTSUPP; 2638 break; 2639 } 2640 2641 unlock: 2642 // assign uint32_t to int 2643 *size_arg = size_val; 2644 2645 return ret; 2646 } 2647 2648 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit) 2649 { 2650 int ret = -EINVAL; 2651 struct smu_context *smu = handle; 2652 2653 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit) 2654 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit); 2655 2656 return ret; 2657 } 2658 2659 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit) 2660 { 2661 int ret = -EINVAL; 2662 struct smu_context *smu = handle; 2663 2664 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit) 2665 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit); 2666 2667 return ret; 2668 } 2669 2670 static int smu_get_power_profile_mode(void *handle, char *buf) 2671 { 2672 struct smu_context *smu = handle; 2673 2674 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 2675 !smu->ppt_funcs->get_power_profile_mode) 2676 return -EOPNOTSUPP; 2677 if (!buf) 2678 return -EINVAL; 2679 2680 return smu->ppt_funcs->get_power_profile_mode(smu, buf); 2681 } 2682 2683 static int smu_set_power_profile_mode(void *handle, 2684 long *param, 2685 uint32_t param_size) 2686 { 2687 struct smu_context *smu = handle; 2688 2689 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 2690 !smu->ppt_funcs->set_power_profile_mode) 2691 return -EOPNOTSUPP; 2692 2693 return smu_bump_power_profile_mode(smu, param, param_size); 2694 } 2695 2696 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) 2697 { 2698 struct smu_context *smu = handle; 2699 2700 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2701 return -EOPNOTSUPP; 2702 2703 if (!smu->ppt_funcs->get_fan_control_mode) 2704 return -EOPNOTSUPP; 2705 2706 if (!fan_mode) 2707 return -EINVAL; 2708 2709 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); 2710 2711 return 0; 2712 } 2713 2714 static int smu_set_fan_control_mode(void *handle, u32 value) 2715 { 2716 struct smu_context *smu = handle; 2717 int ret = 0; 2718 2719 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2720 return -EOPNOTSUPP; 2721 2722 if (!smu->ppt_funcs->set_fan_control_mode) 2723 return -EOPNOTSUPP; 2724 2725 if (value == U32_MAX) 2726 return -EINVAL; 2727 2728 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 2729 if (ret) 2730 goto out; 2731 2732 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2733 smu->user_dpm_profile.fan_mode = value; 2734 2735 /* reset user dpm fan speed */ 2736 if (value != AMD_FAN_CTRL_MANUAL) { 2737 smu->user_dpm_profile.fan_speed_pwm = 0; 2738 smu->user_dpm_profile.fan_speed_rpm = 0; 2739 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); 2740 } 2741 } 2742 2743 out: 2744 return ret; 2745 } 2746 2747 static int smu_get_fan_speed_pwm(void *handle, u32 *speed) 2748 { 2749 struct smu_context *smu = handle; 2750 int ret = 0; 2751 2752 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2753 return -EOPNOTSUPP; 2754 2755 if (!smu->ppt_funcs->get_fan_speed_pwm) 2756 return -EOPNOTSUPP; 2757 2758 if (!speed) 2759 return -EINVAL; 2760 2761 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); 2762 2763 return ret; 2764 } 2765 2766 static int smu_set_fan_speed_pwm(void *handle, u32 speed) 2767 { 2768 struct smu_context *smu = handle; 2769 int ret = 0; 2770 2771 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2772 return -EOPNOTSUPP; 2773 2774 if (!smu->ppt_funcs->set_fan_speed_pwm) 2775 return -EOPNOTSUPP; 2776 2777 if (speed == U32_MAX) 2778 return -EINVAL; 2779 2780 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); 2781 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 2782 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; 2783 smu->user_dpm_profile.fan_speed_pwm = speed; 2784 2785 /* Override custom RPM setting as they cannot co-exist */ 2786 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; 2787 smu->user_dpm_profile.fan_speed_rpm = 0; 2788 } 2789 2790 return ret; 2791 } 2792 2793 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) 2794 { 2795 struct smu_context *smu = handle; 2796 int ret = 0; 2797 2798 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2799 return -EOPNOTSUPP; 2800 2801 if (!smu->ppt_funcs->get_fan_speed_rpm) 2802 return -EOPNOTSUPP; 2803 2804 if (!speed) 2805 return -EINVAL; 2806 2807 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 2808 2809 return ret; 2810 } 2811 2812 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) 2813 { 2814 struct smu_context *smu = handle; 2815 2816 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2817 return -EOPNOTSUPP; 2818 2819 return smu_set_min_dcef_deep_sleep(smu, clk); 2820 } 2821 2822 static int smu_get_clock_by_type_with_latency(void *handle, 2823 enum amd_pp_clock_type type, 2824 struct pp_clock_levels_with_latency *clocks) 2825 { 2826 struct smu_context *smu = handle; 2827 enum smu_clk_type clk_type; 2828 int ret = 0; 2829 2830 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2831 return -EOPNOTSUPP; 2832 2833 if (smu->ppt_funcs->get_clock_by_type_with_latency) { 2834 switch (type) { 2835 case amd_pp_sys_clock: 2836 clk_type = SMU_GFXCLK; 2837 break; 2838 case amd_pp_mem_clock: 2839 clk_type = SMU_MCLK; 2840 break; 2841 case amd_pp_dcef_clock: 2842 clk_type = SMU_DCEFCLK; 2843 break; 2844 case amd_pp_disp_clock: 2845 clk_type = SMU_DISPCLK; 2846 break; 2847 default: 2848 dev_err(smu->adev->dev, "Invalid clock type!\n"); 2849 return -EINVAL; 2850 } 2851 2852 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 2853 } 2854 2855 return ret; 2856 } 2857 2858 static int smu_display_clock_voltage_request(void *handle, 2859 struct pp_display_clock_request *clock_req) 2860 { 2861 struct smu_context *smu = handle; 2862 int ret = 0; 2863 2864 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2865 return -EOPNOTSUPP; 2866 2867 if (smu->ppt_funcs->display_clock_voltage_request) 2868 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2869 2870 return ret; 2871 } 2872 2873 2874 static int smu_display_disable_memory_clock_switch(void *handle, 2875 bool disable_memory_clock_switch) 2876 { 2877 struct smu_context *smu = handle; 2878 int ret = -EINVAL; 2879 2880 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2881 return -EOPNOTSUPP; 2882 2883 if (smu->ppt_funcs->display_disable_memory_clock_switch) 2884 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2885 2886 return ret; 2887 } 2888 2889 static int smu_set_xgmi_pstate(void *handle, 2890 uint32_t pstate) 2891 { 2892 struct smu_context *smu = handle; 2893 int ret = 0; 2894 2895 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2896 return -EOPNOTSUPP; 2897 2898 if (smu->ppt_funcs->set_xgmi_pstate) 2899 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2900 2901 if (ret) 2902 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 2903 2904 return ret; 2905 } 2906 2907 static int smu_get_baco_capability(void *handle, bool *cap) 2908 { 2909 struct smu_context *smu = handle; 2910 2911 *cap = false; 2912 2913 if (!smu->pm_enabled) 2914 return 0; 2915 2916 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2917 *cap = smu->ppt_funcs->baco_is_support(smu); 2918 2919 return 0; 2920 } 2921 2922 static int smu_baco_set_state(void *handle, int state) 2923 { 2924 struct smu_context *smu = handle; 2925 int ret = 0; 2926 2927 if (!smu->pm_enabled) 2928 return -EOPNOTSUPP; 2929 2930 if (state == 0) { 2931 if (smu->ppt_funcs->baco_exit) 2932 ret = smu->ppt_funcs->baco_exit(smu); 2933 } else if (state == 1) { 2934 if (smu->ppt_funcs->baco_enter) 2935 ret = smu->ppt_funcs->baco_enter(smu); 2936 } else { 2937 return -EINVAL; 2938 } 2939 2940 if (ret) 2941 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", 2942 (state)?"enter":"exit"); 2943 2944 return ret; 2945 } 2946 2947 bool smu_mode1_reset_is_support(struct smu_context *smu) 2948 { 2949 bool ret = false; 2950 2951 if (!smu->pm_enabled) 2952 return false; 2953 2954 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 2955 ret = smu->ppt_funcs->mode1_reset_is_support(smu); 2956 2957 return ret; 2958 } 2959 2960 bool smu_mode2_reset_is_support(struct smu_context *smu) 2961 { 2962 bool ret = false; 2963 2964 if (!smu->pm_enabled) 2965 return false; 2966 2967 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) 2968 ret = smu->ppt_funcs->mode2_reset_is_support(smu); 2969 2970 return ret; 2971 } 2972 2973 int smu_mode1_reset(struct smu_context *smu) 2974 { 2975 int ret = 0; 2976 2977 if (!smu->pm_enabled) 2978 return -EOPNOTSUPP; 2979 2980 if (smu->ppt_funcs->mode1_reset) 2981 ret = smu->ppt_funcs->mode1_reset(smu); 2982 2983 return ret; 2984 } 2985 2986 static int smu_mode2_reset(void *handle) 2987 { 2988 struct smu_context *smu = handle; 2989 int ret = 0; 2990 2991 if (!smu->pm_enabled) 2992 return -EOPNOTSUPP; 2993 2994 if (smu->ppt_funcs->mode2_reset) 2995 ret = smu->ppt_funcs->mode2_reset(smu); 2996 2997 if (ret) 2998 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 2999 3000 return ret; 3001 } 3002 3003 static int smu_enable_gfx_features(void *handle) 3004 { 3005 struct smu_context *smu = handle; 3006 int ret = 0; 3007 3008 if (!smu->pm_enabled) 3009 return -EOPNOTSUPP; 3010 3011 if (smu->ppt_funcs->enable_gfx_features) 3012 ret = smu->ppt_funcs->enable_gfx_features(smu); 3013 3014 if (ret) 3015 dev_err(smu->adev->dev, "enable gfx features failed!\n"); 3016 3017 return ret; 3018 } 3019 3020 static int smu_get_max_sustainable_clocks_by_dc(void *handle, 3021 struct pp_smu_nv_clock_table *max_clocks) 3022 { 3023 struct smu_context *smu = handle; 3024 int ret = 0; 3025 3026 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3027 return -EOPNOTSUPP; 3028 3029 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 3030 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 3031 3032 return ret; 3033 } 3034 3035 static int smu_get_uclk_dpm_states(void *handle, 3036 unsigned int *clock_values_in_khz, 3037 unsigned int *num_states) 3038 { 3039 struct smu_context *smu = handle; 3040 int ret = 0; 3041 3042 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3043 return -EOPNOTSUPP; 3044 3045 if (smu->ppt_funcs->get_uclk_dpm_states) 3046 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 3047 3048 return ret; 3049 } 3050 3051 static enum amd_pm_state_type smu_get_current_power_state(void *handle) 3052 { 3053 struct smu_context *smu = handle; 3054 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 3055 3056 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3057 return -EOPNOTSUPP; 3058 3059 if (smu->ppt_funcs->get_current_power_state) 3060 pm_state = smu->ppt_funcs->get_current_power_state(smu); 3061 3062 return pm_state; 3063 } 3064 3065 static int smu_get_dpm_clock_table(void *handle, 3066 struct dpm_clocks *clock_table) 3067 { 3068 struct smu_context *smu = handle; 3069 int ret = 0; 3070 3071 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3072 return -EOPNOTSUPP; 3073 3074 if (smu->ppt_funcs->get_dpm_clock_table) 3075 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 3076 3077 return ret; 3078 } 3079 3080 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) 3081 { 3082 struct smu_context *smu = handle; 3083 3084 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3085 return -EOPNOTSUPP; 3086 3087 if (!smu->ppt_funcs->get_gpu_metrics) 3088 return -EOPNOTSUPP; 3089 3090 return smu->ppt_funcs->get_gpu_metrics(smu, table); 3091 } 3092 3093 static int smu_enable_mgpu_fan_boost(void *handle) 3094 { 3095 struct smu_context *smu = handle; 3096 int ret = 0; 3097 3098 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3099 return -EOPNOTSUPP; 3100 3101 if (smu->ppt_funcs->enable_mgpu_fan_boost) 3102 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 3103 3104 return ret; 3105 } 3106 3107 static int smu_gfx_state_change_set(void *handle, 3108 uint32_t state) 3109 { 3110 struct smu_context *smu = handle; 3111 int ret = 0; 3112 3113 if (smu->ppt_funcs->gfx_state_change_set) 3114 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 3115 3116 return ret; 3117 } 3118 3119 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) 3120 { 3121 int ret = 0; 3122 3123 if (smu->ppt_funcs->smu_handle_passthrough_sbr) 3124 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); 3125 3126 return ret; 3127 } 3128 3129 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) 3130 { 3131 int ret = -EOPNOTSUPP; 3132 3133 if (smu->ppt_funcs && 3134 smu->ppt_funcs->get_ecc_info) 3135 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); 3136 3137 return ret; 3138 3139 } 3140 3141 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) 3142 { 3143 struct smu_context *smu = handle; 3144 struct smu_table_context *smu_table = &smu->smu_table; 3145 struct smu_table *memory_pool = &smu_table->memory_pool; 3146 3147 if (!addr || !size) 3148 return -EINVAL; 3149 3150 *addr = NULL; 3151 *size = 0; 3152 if (memory_pool->bo) { 3153 *addr = memory_pool->cpu_addr; 3154 *size = memory_pool->size; 3155 } 3156 3157 return 0; 3158 } 3159 3160 int smu_set_xgmi_plpd_mode(struct smu_context *smu, 3161 enum pp_xgmi_plpd_mode mode) 3162 { 3163 int ret = -EOPNOTSUPP; 3164 3165 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3166 return ret; 3167 3168 /* PLPD policy is not supported if it's NONE */ 3169 if (smu->plpd_mode == XGMI_PLPD_NONE) 3170 return ret; 3171 3172 if (smu->plpd_mode == mode) 3173 return 0; 3174 3175 if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy) 3176 ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode); 3177 3178 if (!ret) 3179 smu->plpd_mode = mode; 3180 3181 return ret; 3182 } 3183 3184 static const struct amd_pm_funcs swsmu_pm_funcs = { 3185 /* export for sysfs */ 3186 .set_fan_control_mode = smu_set_fan_control_mode, 3187 .get_fan_control_mode = smu_get_fan_control_mode, 3188 .set_fan_speed_pwm = smu_set_fan_speed_pwm, 3189 .get_fan_speed_pwm = smu_get_fan_speed_pwm, 3190 .force_clock_level = smu_force_ppclk_levels, 3191 .print_clock_levels = smu_print_ppclk_levels, 3192 .emit_clock_levels = smu_emit_ppclk_levels, 3193 .force_performance_level = smu_force_performance_level, 3194 .read_sensor = smu_read_sensor, 3195 .get_apu_thermal_limit = smu_get_apu_thermal_limit, 3196 .set_apu_thermal_limit = smu_set_apu_thermal_limit, 3197 .get_performance_level = smu_get_performance_level, 3198 .get_current_power_state = smu_get_current_power_state, 3199 .get_fan_speed_rpm = smu_get_fan_speed_rpm, 3200 .set_fan_speed_rpm = smu_set_fan_speed_rpm, 3201 .get_pp_num_states = smu_get_power_num_states, 3202 .get_pp_table = smu_sys_get_pp_table, 3203 .set_pp_table = smu_sys_set_pp_table, 3204 .switch_power_profile = smu_switch_power_profile, 3205 /* export to amdgpu */ 3206 .dispatch_tasks = smu_handle_dpm_task, 3207 .load_firmware = smu_load_microcode, 3208 .set_powergating_by_smu = smu_dpm_set_power_gate, 3209 .set_power_limit = smu_set_power_limit, 3210 .get_power_limit = smu_get_power_limit, 3211 .get_power_profile_mode = smu_get_power_profile_mode, 3212 .set_power_profile_mode = smu_set_power_profile_mode, 3213 .odn_edit_dpm_table = smu_od_edit_dpm_table, 3214 .set_mp1_state = smu_set_mp1_state, 3215 .gfx_state_change_set = smu_gfx_state_change_set, 3216 /* export to DC */ 3217 .get_sclk = smu_get_sclk, 3218 .get_mclk = smu_get_mclk, 3219 .display_configuration_change = smu_display_configuration_change, 3220 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, 3221 .display_clock_voltage_request = smu_display_clock_voltage_request, 3222 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, 3223 .set_active_display_count = smu_set_display_count, 3224 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, 3225 .get_asic_baco_capability = smu_get_baco_capability, 3226 .set_asic_baco_state = smu_baco_set_state, 3227 .get_ppfeature_status = smu_sys_get_pp_feature_mask, 3228 .set_ppfeature_status = smu_sys_set_pp_feature_mask, 3229 .asic_reset_mode_2 = smu_mode2_reset, 3230 .asic_reset_enable_gfx_features = smu_enable_gfx_features, 3231 .set_df_cstate = smu_set_df_cstate, 3232 .set_xgmi_pstate = smu_set_xgmi_pstate, 3233 .get_gpu_metrics = smu_sys_get_gpu_metrics, 3234 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, 3235 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, 3236 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, 3237 .get_uclk_dpm_states = smu_get_uclk_dpm_states, 3238 .get_dpm_clock_table = smu_get_dpm_clock_table, 3239 .get_smu_prv_buf_details = smu_get_prv_buffer_details, 3240 }; 3241 3242 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, 3243 uint64_t event_arg) 3244 { 3245 int ret = -EINVAL; 3246 3247 if (smu->ppt_funcs->wait_for_event) 3248 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); 3249 3250 return ret; 3251 } 3252 3253 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) 3254 { 3255 3256 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) 3257 return -EOPNOTSUPP; 3258 3259 /* Confirm the buffer allocated is of correct size */ 3260 if (size != smu->stb_context.stb_buf_size) 3261 return -EINVAL; 3262 3263 /* 3264 * No need to lock smu mutex as we access STB directly through MMIO 3265 * and not going through SMU messaging route (for now at least). 3266 * For registers access rely on implementation internal locking. 3267 */ 3268 return smu->ppt_funcs->stb_collect_info(smu, buf, size); 3269 } 3270 3271 #if defined(CONFIG_DEBUG_FS) 3272 3273 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) 3274 { 3275 struct amdgpu_device *adev = filp->f_inode->i_private; 3276 struct smu_context *smu = adev->powerplay.pp_handle; 3277 unsigned char *buf; 3278 int r; 3279 3280 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); 3281 if (!buf) 3282 return -ENOMEM; 3283 3284 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); 3285 if (r) 3286 goto out; 3287 3288 filp->private_data = buf; 3289 3290 return 0; 3291 3292 out: 3293 kvfree(buf); 3294 return r; 3295 } 3296 3297 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, 3298 loff_t *pos) 3299 { 3300 struct amdgpu_device *adev = filp->f_inode->i_private; 3301 struct smu_context *smu = adev->powerplay.pp_handle; 3302 3303 3304 if (!filp->private_data) 3305 return -EINVAL; 3306 3307 return simple_read_from_buffer(buf, 3308 size, 3309 pos, filp->private_data, 3310 smu->stb_context.stb_buf_size); 3311 } 3312 3313 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp) 3314 { 3315 kvfree(filp->private_data); 3316 filp->private_data = NULL; 3317 3318 return 0; 3319 } 3320 3321 /* 3322 * We have to define not only read method but also 3323 * open and release because .read takes up to PAGE_SIZE 3324 * data each time so and so is invoked multiple times. 3325 * We allocate the STB buffer in .open and release it 3326 * in .release 3327 */ 3328 static const struct file_operations smu_stb_debugfs_fops = { 3329 .owner = THIS_MODULE, 3330 .open = smu_stb_debugfs_open, 3331 .read = smu_stb_debugfs_read, 3332 .release = smu_stb_debugfs_release, 3333 .llseek = default_llseek, 3334 }; 3335 3336 #endif 3337 3338 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) 3339 { 3340 #if defined(CONFIG_DEBUG_FS) 3341 3342 struct smu_context *smu = adev->powerplay.pp_handle; 3343 3344 if (!smu || (!smu->stb_context.stb_buf_size)) 3345 return; 3346 3347 debugfs_create_file_size("amdgpu_smu_stb_dump", 3348 S_IRUSR, 3349 adev_to_drm(adev)->primary->debugfs_root, 3350 adev, 3351 &smu_stb_debugfs_fops, 3352 smu->stb_context.stb_buf_size); 3353 #endif 3354 } 3355 3356 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) 3357 { 3358 int ret = 0; 3359 3360 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) 3361 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); 3362 3363 return ret; 3364 } 3365 3366 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) 3367 { 3368 int ret = 0; 3369 3370 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) 3371 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); 3372 3373 return ret; 3374 } 3375