1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/delay.h> 25 #include <linux/module.h> 26 #include <linux/slab.h> 27 28 #include "hwmgr.h" 29 #include "amd_powerplay.h" 30 #include "vega20_smumgr.h" 31 #include "hardwaremanager.h" 32 #include "ppatomfwctrl.h" 33 #include "atomfirmware.h" 34 #include "cgs_common.h" 35 #include "vega20_powertune.h" 36 #include "vega20_inc.h" 37 #include "pppcielanes.h" 38 #include "vega20_hwmgr.h" 39 #include "vega20_processpptables.h" 40 #include "vega20_pptable.h" 41 #include "vega20_thermal.h" 42 #include "vega20_ppsmc.h" 43 #include "pp_debug.h" 44 #include "amd_pcie_helpers.h" 45 #include "ppinterrupt.h" 46 #include "pp_overdriver.h" 47 #include "pp_thermal.h" 48 #include "soc15_common.h" 49 #include "vega20_baco.h" 50 #include "smuio/smuio_9_0_offset.h" 51 #include "smuio/smuio_9_0_sh_mask.h" 52 #include "nbio/nbio_7_4_sh_mask.h" 53 54 #define smnPCIE_LC_SPEED_CNTL 0x11140290 55 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 56 57 #define LINK_WIDTH_MAX 6 58 #define LINK_SPEED_MAX 3 59 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; 60 static const int link_speed[] = {25, 50, 80, 160}; 61 62 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) 63 { 64 struct vega20_hwmgr *data = 65 (struct vega20_hwmgr *)(hwmgr->backend); 66 67 data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT; 68 data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT; 69 data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT; 70 data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT; 71 data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT; 72 73 data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT; 74 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 75 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 76 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 77 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 78 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 79 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 80 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 81 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 82 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 83 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 84 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 85 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 86 87 /* 88 * Disable the following features for now: 89 * GFXCLK DS 90 * SOCLK DS 91 * LCLK DS 92 * DCEFCLK DS 93 * FCLK DS 94 * MP1CLK DS 95 * MP0CLK DS 96 */ 97 data->registry_data.disallowed_features = 0xE0041C00; 98 /* ECC feature should be disabled on old SMUs */ 99 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); 100 if (hwmgr->smu_version < 0x282100) 101 data->registry_data.disallowed_features |= FEATURE_ECC_MASK; 102 103 if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK)) 104 data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK; 105 106 if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK)) 107 data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK; 108 109 if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK)) 110 data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK; 111 112 if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK)) 113 data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK; 114 115 if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK)) 116 data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK; 117 118 if (!(hwmgr->feature_mask & PP_ULV_MASK)) 119 data->registry_data.disallowed_features |= FEATURE_ULV_MASK; 120 121 if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)) 122 data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK; 123 124 data->registry_data.od_state_in_dc_support = 0; 125 data->registry_data.thermal_support = 1; 126 data->registry_data.skip_baco_hardware = 0; 127 128 data->registry_data.log_avfs_param = 0; 129 data->registry_data.sclk_throttle_low_notification = 1; 130 data->registry_data.force_dpm_high = 0; 131 data->registry_data.stable_pstate_sclk_dpm_percentage = 75; 132 133 data->registry_data.didt_support = 0; 134 if (data->registry_data.didt_support) { 135 data->registry_data.didt_mode = 6; 136 data->registry_data.sq_ramping_support = 1; 137 data->registry_data.db_ramping_support = 0; 138 data->registry_data.td_ramping_support = 0; 139 data->registry_data.tcp_ramping_support = 0; 140 data->registry_data.dbr_ramping_support = 0; 141 data->registry_data.edc_didt_support = 1; 142 data->registry_data.gc_didt_support = 0; 143 data->registry_data.psm_didt_support = 0; 144 } 145 146 data->registry_data.pcie_lane_override = 0xff; 147 data->registry_data.pcie_speed_override = 0xff; 148 data->registry_data.pcie_clock_override = 0xffffffff; 149 data->registry_data.regulator_hot_gpio_support = 1; 150 data->registry_data.ac_dc_switch_gpio_support = 0; 151 data->registry_data.quick_transition_support = 0; 152 data->registry_data.zrpm_start_temp = 0xffff; 153 data->registry_data.zrpm_stop_temp = 0xffff; 154 data->registry_data.od8_feature_enable = 1; 155 data->registry_data.disable_water_mark = 0; 156 data->registry_data.disable_pp_tuning = 0; 157 data->registry_data.disable_xlpp_tuning = 0; 158 data->registry_data.disable_workload_policy = 0; 159 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F; 160 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919; 161 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A; 162 data->registry_data.force_workload_policy_mask = 0; 163 data->registry_data.disable_3d_fs_detection = 0; 164 data->registry_data.fps_support = 1; 165 data->registry_data.disable_auto_wattman = 1; 166 data->registry_data.auto_wattman_debug = 0; 167 data->registry_data.auto_wattman_sample_period = 100; 168 data->registry_data.fclk_gfxclk_ratio = 0; 169 data->registry_data.auto_wattman_threshold = 50; 170 data->registry_data.gfxoff_controlled_by_driver = 1; 171 data->gfxoff_allowed = false; 172 data->counter_gfxoff = 0; 173 data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); 174 } 175 176 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) 177 { 178 struct vega20_hwmgr *data = 179 (struct vega20_hwmgr *)(hwmgr->backend); 180 struct amdgpu_device *adev = hwmgr->adev; 181 182 if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE) 183 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 184 PHM_PlatformCaps_ControlVDDCI); 185 186 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 187 PHM_PlatformCaps_TablelessHardwareInterface); 188 189 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 190 PHM_PlatformCaps_BACO); 191 192 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 193 PHM_PlatformCaps_EnableSMU7ThermalManagement); 194 195 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 196 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 197 PHM_PlatformCaps_UVDPowerGating); 198 199 if (adev->pg_flags & AMD_PG_SUPPORT_VCE) 200 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 201 PHM_PlatformCaps_VCEPowerGating); 202 203 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 204 PHM_PlatformCaps_UnTabledHardwareInterface); 205 206 if (data->registry_data.od8_feature_enable) 207 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 208 PHM_PlatformCaps_OD8inACSupport); 209 210 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 211 PHM_PlatformCaps_ActivityReporting); 212 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 213 PHM_PlatformCaps_FanSpeedInTableIsRPM); 214 215 if (data->registry_data.od_state_in_dc_support) { 216 if (data->registry_data.od8_feature_enable) 217 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 218 PHM_PlatformCaps_OD8inDCSupport); 219 } 220 221 if (data->registry_data.thermal_support && 222 data->registry_data.fuzzy_fan_control_support && 223 hwmgr->thermal_controller.advanceFanControlParameters.usTMax) 224 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 225 PHM_PlatformCaps_ODFuzzyFanControlSupport); 226 227 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 228 PHM_PlatformCaps_DynamicPowerManagement); 229 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 230 PHM_PlatformCaps_SMC); 231 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 232 PHM_PlatformCaps_ThermalPolicyDelay); 233 234 if (data->registry_data.force_dpm_high) 235 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 236 PHM_PlatformCaps_ExclusiveModeAlwaysHigh); 237 238 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 239 PHM_PlatformCaps_DynamicUVDState); 240 241 if (data->registry_data.sclk_throttle_low_notification) 242 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 243 PHM_PlatformCaps_SclkThrottleLowNotification); 244 245 /* power tune caps */ 246 /* assume disabled */ 247 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 248 PHM_PlatformCaps_PowerContainment); 249 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 250 PHM_PlatformCaps_DiDtSupport); 251 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 252 PHM_PlatformCaps_SQRamping); 253 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 254 PHM_PlatformCaps_DBRamping); 255 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 256 PHM_PlatformCaps_TDRamping); 257 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 258 PHM_PlatformCaps_TCPRamping); 259 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 260 PHM_PlatformCaps_DBRRamping); 261 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 262 PHM_PlatformCaps_DiDtEDCEnable); 263 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 264 PHM_PlatformCaps_GCEDC); 265 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 266 PHM_PlatformCaps_PSM); 267 268 if (data->registry_data.didt_support) { 269 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 270 PHM_PlatformCaps_DiDtSupport); 271 if (data->registry_data.sq_ramping_support) 272 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 273 PHM_PlatformCaps_SQRamping); 274 if (data->registry_data.db_ramping_support) 275 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 276 PHM_PlatformCaps_DBRamping); 277 if (data->registry_data.td_ramping_support) 278 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 279 PHM_PlatformCaps_TDRamping); 280 if (data->registry_data.tcp_ramping_support) 281 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 282 PHM_PlatformCaps_TCPRamping); 283 if (data->registry_data.dbr_ramping_support) 284 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 285 PHM_PlatformCaps_DBRRamping); 286 if (data->registry_data.edc_didt_support) 287 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 288 PHM_PlatformCaps_DiDtEDCEnable); 289 if (data->registry_data.gc_didt_support) 290 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 291 PHM_PlatformCaps_GCEDC); 292 if (data->registry_data.psm_didt_support) 293 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 294 PHM_PlatformCaps_PSM); 295 } 296 297 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 298 PHM_PlatformCaps_RegulatorHot); 299 300 if (data->registry_data.ac_dc_switch_gpio_support) { 301 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 302 PHM_PlatformCaps_AutomaticDCTransition); 303 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 304 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); 305 } 306 307 if (data->registry_data.quick_transition_support) { 308 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 309 PHM_PlatformCaps_AutomaticDCTransition); 310 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 311 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); 312 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 313 PHM_PlatformCaps_Falcon_QuickTransition); 314 } 315 316 if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) { 317 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 318 PHM_PlatformCaps_LowestUclkReservedForUlv); 319 if (data->lowest_uclk_reserved_for_ulv == 1) 320 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 321 PHM_PlatformCaps_LowestUclkReservedForUlv); 322 } 323 324 if (data->registry_data.custom_fan_support) 325 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 326 PHM_PlatformCaps_CustomFanControlSupport); 327 328 return 0; 329 } 330 331 static int vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) 332 { 333 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 334 struct amdgpu_device *adev = hwmgr->adev; 335 uint32_t top32, bottom32; 336 int i, ret; 337 338 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = 339 FEATURE_DPM_PREFETCHER_BIT; 340 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id = 341 FEATURE_DPM_GFXCLK_BIT; 342 data->smu_features[GNLD_DPM_UCLK].smu_feature_id = 343 FEATURE_DPM_UCLK_BIT; 344 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id = 345 FEATURE_DPM_SOCCLK_BIT; 346 data->smu_features[GNLD_DPM_UVD].smu_feature_id = 347 FEATURE_DPM_UVD_BIT; 348 data->smu_features[GNLD_DPM_VCE].smu_feature_id = 349 FEATURE_DPM_VCE_BIT; 350 data->smu_features[GNLD_ULV].smu_feature_id = 351 FEATURE_ULV_BIT; 352 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id = 353 FEATURE_DPM_MP0CLK_BIT; 354 data->smu_features[GNLD_DPM_LINK].smu_feature_id = 355 FEATURE_DPM_LINK_BIT; 356 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id = 357 FEATURE_DPM_DCEFCLK_BIT; 358 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id = 359 FEATURE_DS_GFXCLK_BIT; 360 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id = 361 FEATURE_DS_SOCCLK_BIT; 362 data->smu_features[GNLD_DS_LCLK].smu_feature_id = 363 FEATURE_DS_LCLK_BIT; 364 data->smu_features[GNLD_PPT].smu_feature_id = 365 FEATURE_PPT_BIT; 366 data->smu_features[GNLD_TDC].smu_feature_id = 367 FEATURE_TDC_BIT; 368 data->smu_features[GNLD_THERMAL].smu_feature_id = 369 FEATURE_THERMAL_BIT; 370 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id = 371 FEATURE_GFX_PER_CU_CG_BIT; 372 data->smu_features[GNLD_RM].smu_feature_id = 373 FEATURE_RM_BIT; 374 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id = 375 FEATURE_DS_DCEFCLK_BIT; 376 data->smu_features[GNLD_ACDC].smu_feature_id = 377 FEATURE_ACDC_BIT; 378 data->smu_features[GNLD_VR0HOT].smu_feature_id = 379 FEATURE_VR0HOT_BIT; 380 data->smu_features[GNLD_VR1HOT].smu_feature_id = 381 FEATURE_VR1HOT_BIT; 382 data->smu_features[GNLD_FW_CTF].smu_feature_id = 383 FEATURE_FW_CTF_BIT; 384 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id = 385 FEATURE_LED_DISPLAY_BIT; 386 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = 387 FEATURE_FAN_CONTROL_BIT; 388 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; 389 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT; 390 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT; 391 data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT; 392 data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT; 393 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; 394 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; 395 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; 396 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT; 397 398 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 399 data->smu_features[i].smu_feature_bitmap = 400 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id); 401 data->smu_features[i].allowed = 402 ((data->registry_data.disallowed_features >> i) & 1) ? 403 false : true; 404 } 405 406 /* Get the SN to turn into a Unique ID */ 407 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 408 if (ret) 409 return ret; 410 411 ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 412 if (ret) 413 return ret; 414 415 adev->unique_id = ((uint64_t)bottom32 << 32) | top32; 416 417 return 0; 418 } 419 420 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) 421 { 422 return 0; 423 } 424 425 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 426 { 427 kfree(hwmgr->backend); 428 hwmgr->backend = NULL; 429 430 return 0; 431 } 432 433 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 434 { 435 struct vega20_hwmgr *data; 436 struct amdgpu_device *adev = hwmgr->adev; 437 int result; 438 439 data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL); 440 if (data == NULL) 441 return -ENOMEM; 442 443 hwmgr->backend = data; 444 445 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 446 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 447 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 448 449 vega20_set_default_registry_data(hwmgr); 450 451 data->disable_dpm_mask = 0xff; 452 453 /* need to set voltage control types before EVV patching */ 454 data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE; 455 data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE; 456 data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE; 457 458 data->water_marks_bitmap = 0; 459 data->avfs_exist = false; 460 461 vega20_set_features_platform_caps(hwmgr); 462 463 result = vega20_init_dpm_defaults(hwmgr); 464 if (result) { 465 pr_err("%s failed\n", __func__); 466 return result; 467 } 468 /* Parse pptable data read from VBIOS */ 469 vega20_set_private_data_based_on_pptable(hwmgr); 470 471 data->is_tlu_enabled = false; 472 473 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 474 VEGA20_MAX_HARDWARE_POWERLEVELS; 475 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 476 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 477 478 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ 479 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ 480 hwmgr->platform_descriptor.clockStep.engineClock = 500; 481 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 482 483 data->total_active_cus = adev->gfx.cu_info.number; 484 data->is_custom_profile_set = false; 485 486 return 0; 487 } 488 489 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr) 490 { 491 struct vega20_hwmgr *data = 492 (struct vega20_hwmgr *)(hwmgr->backend); 493 494 data->low_sclk_interrupt_threshold = 0; 495 496 return 0; 497 } 498 499 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) 500 { 501 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 502 int ret = 0; 503 bool use_baco = (amdgpu_in_reset(adev) && 504 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 505 (adev->in_runpm && amdgpu_asic_supports_baco(adev)); 506 507 ret = vega20_init_sclk_threshold(hwmgr); 508 PP_ASSERT_WITH_CODE(!ret, 509 "Failed to init sclk threshold!", 510 return ret); 511 512 if (use_baco) { 513 ret = vega20_baco_apply_vdci_flush_workaround(hwmgr); 514 if (ret) 515 pr_err("Failed to apply vega20 baco workaround!\n"); 516 } 517 518 return ret; 519 } 520 521 /* 522 * @fn vega20_init_dpm_state 523 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. 524 * 525 * @param dpm_state - the address of the DPM Table to initiailize. 526 * @return None. 527 */ 528 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) 529 { 530 dpm_state->soft_min_level = 0x0; 531 dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT; 532 dpm_state->hard_min_level = 0x0; 533 dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT; 534 } 535 536 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, 537 PPCLK_e clk_id, uint32_t *num_of_levels) 538 { 539 int ret = 0; 540 541 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 542 PPSMC_MSG_GetDpmFreqByIndex, 543 (clk_id << 16 | 0xFF), 544 num_of_levels); 545 PP_ASSERT_WITH_CODE(!ret, 546 "[GetNumOfDpmLevel] failed to get dpm levels!", 547 return ret); 548 549 return ret; 550 } 551 552 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, 553 PPCLK_e clk_id, uint32_t index, uint32_t *clk) 554 { 555 int ret = 0; 556 557 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 558 PPSMC_MSG_GetDpmFreqByIndex, 559 (clk_id << 16 | index), 560 clk); 561 PP_ASSERT_WITH_CODE(!ret, 562 "[GetDpmFreqByIndex] failed to get dpm freq by index!", 563 return ret); 564 565 return ret; 566 } 567 568 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr, 569 struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id) 570 { 571 int ret = 0; 572 uint32_t i, num_of_levels, clk; 573 574 ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels); 575 PP_ASSERT_WITH_CODE(!ret, 576 "[SetupSingleDpmTable] failed to get clk levels!", 577 return ret); 578 579 dpm_table->count = num_of_levels; 580 581 for (i = 0; i < num_of_levels; i++) { 582 ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk); 583 PP_ASSERT_WITH_CODE(!ret, 584 "[SetupSingleDpmTable] failed to get clk of specific level!", 585 return ret); 586 dpm_table->dpm_levels[i].value = clk; 587 dpm_table->dpm_levels[i].enabled = true; 588 } 589 590 return ret; 591 } 592 593 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr) 594 { 595 struct vega20_hwmgr *data = 596 (struct vega20_hwmgr *)(hwmgr->backend); 597 struct vega20_single_dpm_table *dpm_table; 598 int ret = 0; 599 600 dpm_table = &(data->dpm_table.gfx_table); 601 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { 602 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK); 603 PP_ASSERT_WITH_CODE(!ret, 604 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!", 605 return ret); 606 } else { 607 dpm_table->count = 1; 608 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100; 609 } 610 611 return ret; 612 } 613 614 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr) 615 { 616 struct vega20_hwmgr *data = 617 (struct vega20_hwmgr *)(hwmgr->backend); 618 struct vega20_single_dpm_table *dpm_table; 619 int ret = 0; 620 621 dpm_table = &(data->dpm_table.mem_table); 622 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 623 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK); 624 PP_ASSERT_WITH_CODE(!ret, 625 "[SetupDefaultDpmTable] failed to get memclk dpm levels!", 626 return ret); 627 } else { 628 dpm_table->count = 1; 629 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100; 630 } 631 632 return ret; 633 } 634 635 /* 636 * This function is to initialize all DPM state tables 637 * for SMU based on the dependency table. 638 * Dynamic state patching function will then trim these 639 * state tables to the allowed range based 640 * on the power policy or external client requests, 641 * such as UVD request, etc. 642 */ 643 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 644 { 645 struct vega20_hwmgr *data = 646 (struct vega20_hwmgr *)(hwmgr->backend); 647 struct vega20_single_dpm_table *dpm_table; 648 int ret = 0; 649 650 memset(&data->dpm_table, 0, sizeof(data->dpm_table)); 651 652 /* socclk */ 653 dpm_table = &(data->dpm_table.soc_table); 654 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { 655 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK); 656 PP_ASSERT_WITH_CODE(!ret, 657 "[SetupDefaultDpmTable] failed to get socclk dpm levels!", 658 return ret); 659 } else { 660 dpm_table->count = 1; 661 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100; 662 } 663 vega20_init_dpm_state(&(dpm_table->dpm_state)); 664 665 /* gfxclk */ 666 dpm_table = &(data->dpm_table.gfx_table); 667 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 668 if (ret) 669 return ret; 670 vega20_init_dpm_state(&(dpm_table->dpm_state)); 671 672 /* memclk */ 673 dpm_table = &(data->dpm_table.mem_table); 674 ret = vega20_setup_memclk_dpm_table(hwmgr); 675 if (ret) 676 return ret; 677 vega20_init_dpm_state(&(dpm_table->dpm_state)); 678 679 /* eclk */ 680 dpm_table = &(data->dpm_table.eclk_table); 681 if (data->smu_features[GNLD_DPM_VCE].enabled) { 682 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK); 683 PP_ASSERT_WITH_CODE(!ret, 684 "[SetupDefaultDpmTable] failed to get eclk dpm levels!", 685 return ret); 686 } else { 687 dpm_table->count = 1; 688 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100; 689 } 690 vega20_init_dpm_state(&(dpm_table->dpm_state)); 691 692 /* vclk */ 693 dpm_table = &(data->dpm_table.vclk_table); 694 if (data->smu_features[GNLD_DPM_UVD].enabled) { 695 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK); 696 PP_ASSERT_WITH_CODE(!ret, 697 "[SetupDefaultDpmTable] failed to get vclk dpm levels!", 698 return ret); 699 } else { 700 dpm_table->count = 1; 701 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100; 702 } 703 vega20_init_dpm_state(&(dpm_table->dpm_state)); 704 705 /* dclk */ 706 dpm_table = &(data->dpm_table.dclk_table); 707 if (data->smu_features[GNLD_DPM_UVD].enabled) { 708 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK); 709 PP_ASSERT_WITH_CODE(!ret, 710 "[SetupDefaultDpmTable] failed to get dclk dpm levels!", 711 return ret); 712 } else { 713 dpm_table->count = 1; 714 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100; 715 } 716 vega20_init_dpm_state(&(dpm_table->dpm_state)); 717 718 /* dcefclk */ 719 dpm_table = &(data->dpm_table.dcef_table); 720 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 721 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK); 722 PP_ASSERT_WITH_CODE(!ret, 723 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!", 724 return ret); 725 } else { 726 dpm_table->count = 1; 727 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100; 728 } 729 vega20_init_dpm_state(&(dpm_table->dpm_state)); 730 731 /* pixclk */ 732 dpm_table = &(data->dpm_table.pixel_table); 733 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 734 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK); 735 PP_ASSERT_WITH_CODE(!ret, 736 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!", 737 return ret); 738 } else 739 dpm_table->count = 0; 740 vega20_init_dpm_state(&(dpm_table->dpm_state)); 741 742 /* dispclk */ 743 dpm_table = &(data->dpm_table.display_table); 744 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 745 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK); 746 PP_ASSERT_WITH_CODE(!ret, 747 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!", 748 return ret); 749 } else 750 dpm_table->count = 0; 751 vega20_init_dpm_state(&(dpm_table->dpm_state)); 752 753 /* phyclk */ 754 dpm_table = &(data->dpm_table.phy_table); 755 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 756 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK); 757 PP_ASSERT_WITH_CODE(!ret, 758 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!", 759 return ret); 760 } else 761 dpm_table->count = 0; 762 vega20_init_dpm_state(&(dpm_table->dpm_state)); 763 764 /* fclk */ 765 dpm_table = &(data->dpm_table.fclk_table); 766 if (data->smu_features[GNLD_DPM_FCLK].enabled) { 767 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK); 768 PP_ASSERT_WITH_CODE(!ret, 769 "[SetupDefaultDpmTable] failed to get fclk dpm levels!", 770 return ret); 771 } else { 772 dpm_table->count = 1; 773 dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100; 774 } 775 vega20_init_dpm_state(&(dpm_table->dpm_state)); 776 777 /* save a copy of the default DPM table */ 778 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 779 sizeof(struct vega20_dpm_table)); 780 781 return 0; 782 } 783 784 /** 785 * vega20_init_smc_table - Initializes the SMC table and uploads it 786 * 787 * @hwmgr: the address of the powerplay hardware manager. 788 * return: always 0 789 */ 790 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) 791 { 792 int result; 793 struct vega20_hwmgr *data = 794 (struct vega20_hwmgr *)(hwmgr->backend); 795 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 796 struct pp_atomfwctrl_bios_boot_up_values boot_up_values; 797 struct phm_ppt_v3_information *pptable_information = 798 (struct phm_ppt_v3_information *)hwmgr->pptable; 799 800 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values); 801 PP_ASSERT_WITH_CODE(!result, 802 "[InitSMCTable] Failed to get vbios bootup values!", 803 return result); 804 805 data->vbios_boot_state.vddc = boot_up_values.usVddc; 806 data->vbios_boot_state.vddci = boot_up_values.usVddci; 807 data->vbios_boot_state.mvddc = boot_up_values.usMvddc; 808 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; 809 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; 810 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 811 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; 812 data->vbios_boot_state.eclock = boot_up_values.ulEClk; 813 data->vbios_boot_state.vclock = boot_up_values.ulVClk; 814 data->vbios_boot_state.dclock = boot_up_values.ulDClk; 815 data->vbios_boot_state.fclock = boot_up_values.ulFClk; 816 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; 817 818 smum_send_msg_to_smc_with_parameter(hwmgr, 819 PPSMC_MSG_SetMinDeepSleepDcefclk, 820 (uint32_t)(data->vbios_boot_state.dcef_clock / 100), 821 NULL); 822 823 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); 824 825 result = smum_smc_table_manager(hwmgr, 826 (uint8_t *)pp_table, TABLE_PPTABLE, false); 827 PP_ASSERT_WITH_CODE(!result, 828 "[InitSMCTable] Failed to upload PPtable!", 829 return result); 830 831 return 0; 832 } 833 834 /* 835 * Override PCIe link speed and link width for DPM Level 1. PPTable entries 836 * reflect the ASIC capabilities and not the system capabilities. For e.g. 837 * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch 838 * to DPM1, it fails as system doesn't support Gen4. 839 */ 840 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) 841 { 842 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 843 struct vega20_hwmgr *data = 844 (struct vega20_hwmgr *)(hwmgr->backend); 845 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg; 846 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 847 int i; 848 int ret; 849 850 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 851 pcie_gen = 3; 852 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 853 pcie_gen = 2; 854 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 855 pcie_gen = 1; 856 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 857 pcie_gen = 0; 858 859 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 860 pcie_width = 6; 861 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 862 pcie_width = 5; 863 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 864 pcie_width = 4; 865 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 866 pcie_width = 3; 867 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 868 pcie_width = 2; 869 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 870 pcie_width = 1; 871 872 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 873 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 874 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 875 */ 876 for (i = 0; i < NUM_LINK_LEVELS; i++) { 877 pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen : 878 pp_table->PcieGenSpeed[i]; 879 pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width : 880 pp_table->PcieLaneCount[i]; 881 882 if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg != 883 pp_table->PcieLaneCount[i]) { 884 smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg; 885 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 886 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 887 NULL); 888 PP_ASSERT_WITH_CODE(!ret, 889 "[OverridePcieParameters] Attempt to override pcie params failed!", 890 return ret); 891 } 892 893 /* update the pptable */ 894 pp_table->PcieGenSpeed[i] = pcie_gen_arg; 895 pp_table->PcieLaneCount[i] = pcie_width_arg; 896 } 897 898 /* override to the highest if it's disabled from ppfeaturmask */ 899 if (data->registry_data.pcie_dpm_key_disabled) { 900 for (i = 0; i < NUM_LINK_LEVELS; i++) { 901 smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; 902 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 903 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 904 NULL); 905 PP_ASSERT_WITH_CODE(!ret, 906 "[OverridePcieParameters] Attempt to override pcie params failed!", 907 return ret); 908 909 pp_table->PcieGenSpeed[i] = pcie_gen; 910 pp_table->PcieLaneCount[i] = pcie_width; 911 } 912 ret = vega20_enable_smc_features(hwmgr, 913 false, 914 data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); 915 PP_ASSERT_WITH_CODE(!ret, 916 "Attempt to Disable DPM LINK Failed!", 917 return ret); 918 data->smu_features[GNLD_DPM_LINK].enabled = false; 919 data->smu_features[GNLD_DPM_LINK].supported = false; 920 } 921 922 return 0; 923 } 924 925 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) 926 { 927 struct vega20_hwmgr *data = 928 (struct vega20_hwmgr *)(hwmgr->backend); 929 uint32_t allowed_features_low = 0, allowed_features_high = 0; 930 int i; 931 int ret = 0; 932 933 for (i = 0; i < GNLD_FEATURES_MAX; i++) 934 if (data->smu_features[i].allowed) 935 data->smu_features[i].smu_feature_id > 31 ? 936 (allowed_features_high |= 937 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) 938 & 0xFFFFFFFF)) : 939 (allowed_features_low |= 940 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) 941 & 0xFFFFFFFF)); 942 943 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 944 PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL); 945 PP_ASSERT_WITH_CODE(!ret, 946 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!", 947 return ret); 948 949 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 950 PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL); 951 PP_ASSERT_WITH_CODE(!ret, 952 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!", 953 return ret); 954 955 return 0; 956 } 957 958 static int vega20_run_btc(struct pp_hwmgr *hwmgr) 959 { 960 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL); 961 } 962 963 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr) 964 { 965 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL); 966 } 967 968 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) 969 { 970 struct vega20_hwmgr *data = 971 (struct vega20_hwmgr *)(hwmgr->backend); 972 uint64_t features_enabled; 973 int i; 974 bool enabled; 975 int ret = 0; 976 977 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 978 PPSMC_MSG_EnableAllSmuFeatures, 979 NULL)) == 0, 980 "[EnableAllSMUFeatures] Failed to enable all smu features!", 981 return ret); 982 983 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 984 PP_ASSERT_WITH_CODE(!ret, 985 "[EnableAllSmuFeatures] Failed to get enabled smc features!", 986 return ret); 987 988 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 989 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? 990 true : false; 991 data->smu_features[i].enabled = enabled; 992 data->smu_features[i].supported = enabled; 993 994 #if 0 995 if (data->smu_features[i].allowed && !enabled) 996 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i); 997 else if (!data->smu_features[i].allowed && enabled) 998 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i); 999 #endif 1000 } 1001 1002 return 0; 1003 } 1004 1005 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr) 1006 { 1007 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1008 1009 if (data->smu_features[GNLD_DPM_UCLK].enabled) 1010 return smum_send_msg_to_smc_with_parameter(hwmgr, 1011 PPSMC_MSG_SetUclkFastSwitch, 1012 1, 1013 NULL); 1014 1015 return 0; 1016 } 1017 1018 static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr) 1019 { 1020 struct vega20_hwmgr *data = 1021 (struct vega20_hwmgr *)(hwmgr->backend); 1022 1023 return smum_send_msg_to_smc_with_parameter(hwmgr, 1024 PPSMC_MSG_SetFclkGfxClkRatio, 1025 data->registry_data.fclk_gfxclk_ratio, 1026 NULL); 1027 } 1028 1029 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) 1030 { 1031 struct vega20_hwmgr *data = 1032 (struct vega20_hwmgr *)(hwmgr->backend); 1033 int i, ret = 0; 1034 1035 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 1036 PPSMC_MSG_DisableAllSmuFeatures, 1037 NULL)) == 0, 1038 "[DisableAllSMUFeatures] Failed to disable all smu features!", 1039 return ret); 1040 1041 for (i = 0; i < GNLD_FEATURES_MAX; i++) 1042 data->smu_features[i].enabled = 0; 1043 1044 return 0; 1045 } 1046 1047 static int vega20_od8_set_feature_capabilities( 1048 struct pp_hwmgr *hwmgr) 1049 { 1050 struct phm_ppt_v3_information *pptable_information = 1051 (struct phm_ppt_v3_information *)hwmgr->pptable; 1052 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1053 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1054 struct vega20_od8_settings *od_settings = &(data->od8_settings); 1055 1056 od_settings->overdrive8_capabilities = 0; 1057 1058 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { 1059 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] && 1060 pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 && 1061 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 && 1062 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >= 1063 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) 1064 od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS; 1065 1066 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] && 1067 (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >= 1068 pp_table->MinVoltageGfx / VOLTAGE_SCALE) && 1069 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <= 1070 pp_table->MaxVoltageGfx / VOLTAGE_SCALE) && 1071 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >= 1072 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1])) 1073 od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE; 1074 } 1075 1076 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 1077 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] = 1078 data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value; 1079 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && 1080 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && 1081 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && 1082 (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >= 1083 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX])) 1084 od_settings->overdrive8_capabilities |= OD8_UCLK_MAX; 1085 } 1086 1087 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] && 1088 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 && 1089 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 && 1090 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 && 1091 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100) 1092 od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT; 1093 1094 if (data->smu_features[GNLD_FAN_CONTROL].enabled) { 1095 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] && 1096 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && 1097 pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && 1098 (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >= 1099 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) 1100 od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK; 1101 1102 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] && 1103 (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >= 1104 (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) && 1105 pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 && 1106 (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >= 1107 pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) 1108 od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN; 1109 } 1110 1111 if (data->smu_features[GNLD_THERMAL].enabled) { 1112 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] && 1113 pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 && 1114 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 && 1115 (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >= 1116 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) 1117 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN; 1118 1119 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] && 1120 pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && 1121 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && 1122 (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >= 1123 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) 1124 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM; 1125 } 1126 1127 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE]) 1128 od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE; 1129 1130 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] && 1131 pp_table->FanZeroRpmEnable) 1132 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; 1133 1134 if (!od_settings->overdrive8_capabilities) 1135 hwmgr->od_enabled = false; 1136 1137 return 0; 1138 } 1139 1140 static int vega20_od8_set_feature_id( 1141 struct pp_hwmgr *hwmgr) 1142 { 1143 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1144 struct vega20_od8_settings *od_settings = &(data->od8_settings); 1145 1146 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { 1147 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = 1148 OD8_GFXCLK_LIMITS; 1149 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = 1150 OD8_GFXCLK_LIMITS; 1151 } else { 1152 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = 1153 0; 1154 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = 1155 0; 1156 } 1157 1158 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { 1159 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = 1160 OD8_GFXCLK_CURVE; 1161 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = 1162 OD8_GFXCLK_CURVE; 1163 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = 1164 OD8_GFXCLK_CURVE; 1165 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = 1166 OD8_GFXCLK_CURVE; 1167 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = 1168 OD8_GFXCLK_CURVE; 1169 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = 1170 OD8_GFXCLK_CURVE; 1171 } else { 1172 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = 1173 0; 1174 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = 1175 0; 1176 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = 1177 0; 1178 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = 1179 0; 1180 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = 1181 0; 1182 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = 1183 0; 1184 } 1185 1186 if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX) 1187 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX; 1188 else 1189 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0; 1190 1191 if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT) 1192 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT; 1193 else 1194 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0; 1195 1196 if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) 1197 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = 1198 OD8_ACOUSTIC_LIMIT_SCLK; 1199 else 1200 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = 1201 0; 1202 1203 if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) 1204 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = 1205 OD8_FAN_SPEED_MIN; 1206 else 1207 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = 1208 0; 1209 1210 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) 1211 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = 1212 OD8_TEMPERATURE_FAN; 1213 else 1214 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = 1215 0; 1216 1217 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) 1218 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = 1219 OD8_TEMPERATURE_SYSTEM; 1220 else 1221 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = 1222 0; 1223 1224 return 0; 1225 } 1226 1227 static int vega20_od8_get_gfx_clock_base_voltage( 1228 struct pp_hwmgr *hwmgr, 1229 uint32_t *voltage, 1230 uint32_t freq) 1231 { 1232 int ret = 0; 1233 1234 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1235 PPSMC_MSG_GetAVFSVoltageByDpm, 1236 ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq), 1237 voltage); 1238 PP_ASSERT_WITH_CODE(!ret, 1239 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!", 1240 return ret); 1241 1242 *voltage = *voltage / VOLTAGE_SCALE; 1243 1244 return 0; 1245 } 1246 1247 static int vega20_od8_initialize_default_settings( 1248 struct pp_hwmgr *hwmgr) 1249 { 1250 struct phm_ppt_v3_information *pptable_information = 1251 (struct phm_ppt_v3_information *)hwmgr->pptable; 1252 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1253 struct vega20_od8_settings *od8_settings = &(data->od8_settings); 1254 OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); 1255 int i, ret = 0; 1256 1257 /* Set Feature Capabilities */ 1258 vega20_od8_set_feature_capabilities(hwmgr); 1259 1260 /* Map FeatureID to individual settings */ 1261 vega20_od8_set_feature_id(hwmgr); 1262 1263 /* Set default values */ 1264 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true); 1265 PP_ASSERT_WITH_CODE(!ret, 1266 "Failed to export over drive table!", 1267 return ret); 1268 1269 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { 1270 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = 1271 od_table->GfxclkFmin; 1272 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = 1273 od_table->GfxclkFmax; 1274 } else { 1275 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = 1276 0; 1277 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = 1278 0; 1279 } 1280 1281 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { 1282 od_table->GfxclkFreq1 = od_table->GfxclkFmin; 1283 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = 1284 od_table->GfxclkFreq1; 1285 1286 od_table->GfxclkFreq3 = od_table->GfxclkFmax; 1287 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = 1288 od_table->GfxclkFreq3; 1289 1290 od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2; 1291 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = 1292 od_table->GfxclkFreq2; 1293 1294 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1295 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value), 1296 od_table->GfxclkFreq1), 1297 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1298 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0); 1299 od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value 1300 * VOLTAGE_SCALE; 1301 1302 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1303 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value), 1304 od_table->GfxclkFreq2), 1305 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1306 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0); 1307 od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value 1308 * VOLTAGE_SCALE; 1309 1310 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1311 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value), 1312 od_table->GfxclkFreq3), 1313 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1314 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0); 1315 od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value 1316 * VOLTAGE_SCALE; 1317 } else { 1318 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = 1319 0; 1320 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 1321 0; 1322 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = 1323 0; 1324 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 1325 0; 1326 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = 1327 0; 1328 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 1329 0; 1330 } 1331 1332 if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX) 1333 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = 1334 od_table->UclkFmax; 1335 else 1336 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = 1337 0; 1338 1339 if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT) 1340 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = 1341 od_table->OverDrivePct; 1342 else 1343 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = 1344 0; 1345 1346 if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) 1347 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = 1348 od_table->FanMaximumRpm; 1349 else 1350 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = 1351 0; 1352 1353 if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) 1354 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = 1355 od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100; 1356 else 1357 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = 1358 0; 1359 1360 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) 1361 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = 1362 od_table->FanTargetTemperature; 1363 else 1364 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = 1365 0; 1366 1367 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) 1368 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = 1369 od_table->MaxOpTemp; 1370 else 1371 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = 1372 0; 1373 1374 for (i = 0; i < OD8_SETTING_COUNT; i++) { 1375 if (od8_settings->od8_settings_array[i].feature_id) { 1376 od8_settings->od8_settings_array[i].min_value = 1377 pptable_information->od_settings_min[i]; 1378 od8_settings->od8_settings_array[i].max_value = 1379 pptable_information->od_settings_max[i]; 1380 od8_settings->od8_settings_array[i].current_value = 1381 od8_settings->od8_settings_array[i].default_value; 1382 } else { 1383 od8_settings->od8_settings_array[i].min_value = 1384 0; 1385 od8_settings->od8_settings_array[i].max_value = 1386 0; 1387 od8_settings->od8_settings_array[i].current_value = 1388 0; 1389 } 1390 } 1391 1392 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false); 1393 PP_ASSERT_WITH_CODE(!ret, 1394 "Failed to import over drive table!", 1395 return ret); 1396 1397 return 0; 1398 } 1399 1400 static int vega20_od8_set_settings( 1401 struct pp_hwmgr *hwmgr, 1402 uint32_t index, 1403 uint32_t value) 1404 { 1405 OverDriveTable_t od_table; 1406 int ret = 0; 1407 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1408 struct vega20_od8_single_setting *od8_settings = 1409 data->od8_settings.od8_settings_array; 1410 1411 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true); 1412 PP_ASSERT_WITH_CODE(!ret, 1413 "Failed to export over drive table!", 1414 return ret); 1415 1416 switch (index) { 1417 case OD8_SETTING_GFXCLK_FMIN: 1418 od_table.GfxclkFmin = (uint16_t)value; 1419 break; 1420 case OD8_SETTING_GFXCLK_FMAX: 1421 if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value || 1422 value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) 1423 return -EINVAL; 1424 1425 od_table.GfxclkFmax = (uint16_t)value; 1426 break; 1427 case OD8_SETTING_GFXCLK_FREQ1: 1428 od_table.GfxclkFreq1 = (uint16_t)value; 1429 break; 1430 case OD8_SETTING_GFXCLK_VOLTAGE1: 1431 od_table.GfxclkVolt1 = (uint16_t)value; 1432 break; 1433 case OD8_SETTING_GFXCLK_FREQ2: 1434 od_table.GfxclkFreq2 = (uint16_t)value; 1435 break; 1436 case OD8_SETTING_GFXCLK_VOLTAGE2: 1437 od_table.GfxclkVolt2 = (uint16_t)value; 1438 break; 1439 case OD8_SETTING_GFXCLK_FREQ3: 1440 od_table.GfxclkFreq3 = (uint16_t)value; 1441 break; 1442 case OD8_SETTING_GFXCLK_VOLTAGE3: 1443 od_table.GfxclkVolt3 = (uint16_t)value; 1444 break; 1445 case OD8_SETTING_UCLK_FMAX: 1446 if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || 1447 value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) 1448 return -EINVAL; 1449 od_table.UclkFmax = (uint16_t)value; 1450 break; 1451 case OD8_SETTING_POWER_PERCENTAGE: 1452 od_table.OverDrivePct = (int16_t)value; 1453 break; 1454 case OD8_SETTING_FAN_ACOUSTIC_LIMIT: 1455 od_table.FanMaximumRpm = (uint16_t)value; 1456 break; 1457 case OD8_SETTING_FAN_MIN_SPEED: 1458 od_table.FanMinimumPwm = (uint16_t)value; 1459 break; 1460 case OD8_SETTING_FAN_TARGET_TEMP: 1461 od_table.FanTargetTemperature = (uint16_t)value; 1462 break; 1463 case OD8_SETTING_OPERATING_TEMP_MAX: 1464 od_table.MaxOpTemp = (uint16_t)value; 1465 break; 1466 } 1467 1468 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false); 1469 PP_ASSERT_WITH_CODE(!ret, 1470 "Failed to import over drive table!", 1471 return ret); 1472 1473 return 0; 1474 } 1475 1476 static int vega20_get_sclk_od( 1477 struct pp_hwmgr *hwmgr) 1478 { 1479 struct vega20_hwmgr *data = hwmgr->backend; 1480 struct vega20_single_dpm_table *sclk_table = 1481 &(data->dpm_table.gfx_table); 1482 struct vega20_single_dpm_table *golden_sclk_table = 1483 &(data->golden_dpm_table.gfx_table); 1484 int value = sclk_table->dpm_levels[sclk_table->count - 1].value; 1485 int golden_value = golden_sclk_table->dpm_levels 1486 [golden_sclk_table->count - 1].value; 1487 1488 /* od percentage */ 1489 value -= golden_value; 1490 value = DIV_ROUND_UP(value * 100, golden_value); 1491 1492 return value; 1493 } 1494 1495 static int vega20_set_sclk_od( 1496 struct pp_hwmgr *hwmgr, uint32_t value) 1497 { 1498 struct vega20_hwmgr *data = hwmgr->backend; 1499 struct vega20_single_dpm_table *golden_sclk_table = 1500 &(data->golden_dpm_table.gfx_table); 1501 uint32_t od_sclk; 1502 int ret = 0; 1503 1504 od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value; 1505 od_sclk /= 100; 1506 od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; 1507 1508 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk); 1509 PP_ASSERT_WITH_CODE(!ret, 1510 "[SetSclkOD] failed to set od gfxclk!", 1511 return ret); 1512 1513 /* retrieve updated gfxclk table */ 1514 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 1515 PP_ASSERT_WITH_CODE(!ret, 1516 "[SetSclkOD] failed to refresh gfxclk table!", 1517 return ret); 1518 1519 return 0; 1520 } 1521 1522 static int vega20_get_mclk_od( 1523 struct pp_hwmgr *hwmgr) 1524 { 1525 struct vega20_hwmgr *data = hwmgr->backend; 1526 struct vega20_single_dpm_table *mclk_table = 1527 &(data->dpm_table.mem_table); 1528 struct vega20_single_dpm_table *golden_mclk_table = 1529 &(data->golden_dpm_table.mem_table); 1530 int value = mclk_table->dpm_levels[mclk_table->count - 1].value; 1531 int golden_value = golden_mclk_table->dpm_levels 1532 [golden_mclk_table->count - 1].value; 1533 1534 /* od percentage */ 1535 value -= golden_value; 1536 value = DIV_ROUND_UP(value * 100, golden_value); 1537 1538 return value; 1539 } 1540 1541 static int vega20_set_mclk_od( 1542 struct pp_hwmgr *hwmgr, uint32_t value) 1543 { 1544 struct vega20_hwmgr *data = hwmgr->backend; 1545 struct vega20_single_dpm_table *golden_mclk_table = 1546 &(data->golden_dpm_table.mem_table); 1547 uint32_t od_mclk; 1548 int ret = 0; 1549 1550 od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value; 1551 od_mclk /= 100; 1552 od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; 1553 1554 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk); 1555 PP_ASSERT_WITH_CODE(!ret, 1556 "[SetMclkOD] failed to set od memclk!", 1557 return ret); 1558 1559 /* retrieve updated memclk table */ 1560 ret = vega20_setup_memclk_dpm_table(hwmgr); 1561 PP_ASSERT_WITH_CODE(!ret, 1562 "[SetMclkOD] failed to refresh memclk table!", 1563 return ret); 1564 1565 return 0; 1566 } 1567 1568 static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) 1569 { 1570 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1571 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table); 1572 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table); 1573 1574 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && 1575 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) { 1576 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 1577 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 1578 } else { 1579 hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value; 1580 hwmgr->pstate_mclk = mem_table->dpm_levels[0].value; 1581 } 1582 1583 hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value; 1584 hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value; 1585 } 1586 1587 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, 1588 PP_Clock *clock, PPCLK_e clock_select) 1589 { 1590 int ret = 0; 1591 1592 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1593 PPSMC_MSG_GetDcModeMaxDpmFreq, 1594 (clock_select << 16), 1595 clock)) == 0, 1596 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!", 1597 return ret); 1598 1599 /* if DC limit is zero, return AC limit */ 1600 if (*clock == 0) { 1601 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1602 PPSMC_MSG_GetMaxDpmFreq, 1603 (clock_select << 16), 1604 clock)) == 0, 1605 "[GetMaxSustainableClock] failed to get max AC clock from SMC!", 1606 return ret); 1607 } 1608 1609 return 0; 1610 } 1611 1612 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr) 1613 { 1614 struct vega20_hwmgr *data = 1615 (struct vega20_hwmgr *)(hwmgr->backend); 1616 struct vega20_max_sustainable_clocks *max_sustainable_clocks = 1617 &(data->max_sustainable_clocks); 1618 int ret = 0; 1619 1620 max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100; 1621 max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100; 1622 max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100; 1623 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 1624 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 1625 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 1626 1627 if (data->smu_features[GNLD_DPM_UCLK].enabled) 1628 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1629 &(max_sustainable_clocks->uclock), 1630 PPCLK_UCLK)) == 0, 1631 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!", 1632 return ret); 1633 1634 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) 1635 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1636 &(max_sustainable_clocks->soc_clock), 1637 PPCLK_SOCCLK)) == 0, 1638 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!", 1639 return ret); 1640 1641 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 1642 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1643 &(max_sustainable_clocks->dcef_clock), 1644 PPCLK_DCEFCLK)) == 0, 1645 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!", 1646 return ret); 1647 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1648 &(max_sustainable_clocks->display_clock), 1649 PPCLK_DISPCLK)) == 0, 1650 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!", 1651 return ret); 1652 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1653 &(max_sustainable_clocks->phy_clock), 1654 PPCLK_PHYCLK)) == 0, 1655 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!", 1656 return ret); 1657 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1658 &(max_sustainable_clocks->pixel_clock), 1659 PPCLK_PIXCLK)) == 0, 1660 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!", 1661 return ret); 1662 } 1663 1664 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 1665 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 1666 1667 return 0; 1668 } 1669 1670 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr) 1671 { 1672 int result; 1673 1674 result = smum_send_msg_to_smc(hwmgr, 1675 PPSMC_MSG_SetMGpuFanBoostLimitRpm, 1676 NULL); 1677 PP_ASSERT_WITH_CODE(!result, 1678 "[EnableMgpuFan] Failed to enable mgpu fan boost!", 1679 return result); 1680 1681 return 0; 1682 } 1683 1684 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) 1685 { 1686 struct vega20_hwmgr *data = 1687 (struct vega20_hwmgr *)(hwmgr->backend); 1688 1689 data->uvd_power_gated = true; 1690 data->vce_power_gated = true; 1691 } 1692 1693 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1694 { 1695 int result = 0; 1696 1697 smum_send_msg_to_smc_with_parameter(hwmgr, 1698 PPSMC_MSG_NumOfDisplays, 0, NULL); 1699 1700 result = vega20_set_allowed_featuresmask(hwmgr); 1701 PP_ASSERT_WITH_CODE(!result, 1702 "[EnableDPMTasks] Failed to set allowed featuresmask!\n", 1703 return result); 1704 1705 result = vega20_init_smc_table(hwmgr); 1706 PP_ASSERT_WITH_CODE(!result, 1707 "[EnableDPMTasks] Failed to initialize SMC table!", 1708 return result); 1709 1710 result = vega20_run_btc(hwmgr); 1711 PP_ASSERT_WITH_CODE(!result, 1712 "[EnableDPMTasks] Failed to run btc!", 1713 return result); 1714 1715 result = vega20_run_btc_afll(hwmgr); 1716 PP_ASSERT_WITH_CODE(!result, 1717 "[EnableDPMTasks] Failed to run btc afll!", 1718 return result); 1719 1720 result = vega20_enable_all_smu_features(hwmgr); 1721 PP_ASSERT_WITH_CODE(!result, 1722 "[EnableDPMTasks] Failed to enable all smu features!", 1723 return result); 1724 1725 result = vega20_override_pcie_parameters(hwmgr); 1726 PP_ASSERT_WITH_CODE(!result, 1727 "[EnableDPMTasks] Failed to override pcie parameters!", 1728 return result); 1729 1730 result = vega20_notify_smc_display_change(hwmgr); 1731 PP_ASSERT_WITH_CODE(!result, 1732 "[EnableDPMTasks] Failed to notify smc display change!", 1733 return result); 1734 1735 result = vega20_send_clock_ratio(hwmgr); 1736 PP_ASSERT_WITH_CODE(!result, 1737 "[EnableDPMTasks] Failed to send clock ratio!", 1738 return result); 1739 1740 /* Initialize UVD/VCE powergating state */ 1741 vega20_init_powergate_state(hwmgr); 1742 1743 result = vega20_setup_default_dpm_tables(hwmgr); 1744 PP_ASSERT_WITH_CODE(!result, 1745 "[EnableDPMTasks] Failed to setup default DPM tables!", 1746 return result); 1747 1748 result = vega20_init_max_sustainable_clocks(hwmgr); 1749 PP_ASSERT_WITH_CODE(!result, 1750 "[EnableDPMTasks] Failed to get maximum sustainable clocks!", 1751 return result); 1752 1753 result = vega20_power_control_set_level(hwmgr); 1754 PP_ASSERT_WITH_CODE(!result, 1755 "[EnableDPMTasks] Failed to power control set level!", 1756 return result); 1757 1758 result = vega20_od8_initialize_default_settings(hwmgr); 1759 PP_ASSERT_WITH_CODE(!result, 1760 "[EnableDPMTasks] Failed to initialize odn settings!", 1761 return result); 1762 1763 vega20_populate_umdpstate_clocks(hwmgr); 1764 1765 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit, 1766 POWER_SOURCE_AC << 16, &hwmgr->default_power_limit); 1767 PP_ASSERT_WITH_CODE(!result, 1768 "[GetPptLimit] get default PPT limit failed!", 1769 return result); 1770 hwmgr->power_limit = 1771 hwmgr->default_power_limit; 1772 1773 return 0; 1774 } 1775 1776 static uint32_t vega20_find_lowest_dpm_level( 1777 struct vega20_single_dpm_table *table) 1778 { 1779 uint32_t i; 1780 1781 for (i = 0; i < table->count; i++) { 1782 if (table->dpm_levels[i].enabled) 1783 break; 1784 } 1785 if (i >= table->count) { 1786 i = 0; 1787 table->dpm_levels[i].enabled = true; 1788 } 1789 1790 return i; 1791 } 1792 1793 static uint32_t vega20_find_highest_dpm_level( 1794 struct vega20_single_dpm_table *table) 1795 { 1796 int i = 0; 1797 1798 PP_ASSERT_WITH_CODE(table != NULL, 1799 "[FindHighestDPMLevel] DPM Table does not exist!", 1800 return 0); 1801 PP_ASSERT_WITH_CODE(table->count > 0, 1802 "[FindHighestDPMLevel] DPM Table has no entry!", 1803 return 0); 1804 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER, 1805 "[FindHighestDPMLevel] DPM Table has too many entries!", 1806 return MAX_REGULAR_DPM_NUMBER - 1); 1807 1808 for (i = table->count - 1; i >= 0; i--) { 1809 if (table->dpm_levels[i].enabled) 1810 break; 1811 } 1812 if (i < 0) { 1813 i = 0; 1814 table->dpm_levels[i].enabled = true; 1815 } 1816 1817 return i; 1818 } 1819 1820 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) 1821 { 1822 struct vega20_hwmgr *data = 1823 (struct vega20_hwmgr *)(hwmgr->backend); 1824 uint32_t min_freq; 1825 int ret = 0; 1826 1827 if (data->smu_features[GNLD_DPM_GFXCLK].enabled && 1828 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { 1829 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; 1830 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1831 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1832 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff), 1833 NULL)), 1834 "Failed to set soft min gfxclk !", 1835 return ret); 1836 } 1837 1838 if (data->smu_features[GNLD_DPM_UCLK].enabled && 1839 (feature_mask & FEATURE_DPM_UCLK_MASK)) { 1840 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; 1841 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1842 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1843 (PPCLK_UCLK << 16) | (min_freq & 0xffff), 1844 NULL)), 1845 "Failed to set soft min memclk !", 1846 return ret); 1847 } 1848 1849 if (data->smu_features[GNLD_DPM_UVD].enabled && 1850 (feature_mask & FEATURE_DPM_UVD_MASK)) { 1851 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; 1852 1853 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1854 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1855 (PPCLK_VCLK << 16) | (min_freq & 0xffff), 1856 NULL)), 1857 "Failed to set soft min vclk!", 1858 return ret); 1859 1860 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level; 1861 1862 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1863 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1864 (PPCLK_DCLK << 16) | (min_freq & 0xffff), 1865 NULL)), 1866 "Failed to set soft min dclk!", 1867 return ret); 1868 } 1869 1870 if (data->smu_features[GNLD_DPM_VCE].enabled && 1871 (feature_mask & FEATURE_DPM_VCE_MASK)) { 1872 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; 1873 1874 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1875 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1876 (PPCLK_ECLK << 16) | (min_freq & 0xffff), 1877 NULL)), 1878 "Failed to set soft min eclk!", 1879 return ret); 1880 } 1881 1882 if (data->smu_features[GNLD_DPM_SOCCLK].enabled && 1883 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { 1884 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; 1885 1886 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1887 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1888 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff), 1889 NULL)), 1890 "Failed to set soft min socclk!", 1891 return ret); 1892 } 1893 1894 if (data->smu_features[GNLD_DPM_FCLK].enabled && 1895 (feature_mask & FEATURE_DPM_FCLK_MASK)) { 1896 min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level; 1897 1898 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1899 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1900 (PPCLK_FCLK << 16) | (min_freq & 0xffff), 1901 NULL)), 1902 "Failed to set soft min fclk!", 1903 return ret); 1904 } 1905 1906 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled && 1907 (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) { 1908 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level; 1909 1910 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1911 hwmgr, PPSMC_MSG_SetHardMinByFreq, 1912 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff), 1913 NULL)), 1914 "Failed to set hard min dcefclk!", 1915 return ret); 1916 } 1917 1918 return ret; 1919 } 1920 1921 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) 1922 { 1923 struct vega20_hwmgr *data = 1924 (struct vega20_hwmgr *)(hwmgr->backend); 1925 uint32_t max_freq; 1926 int ret = 0; 1927 1928 if (data->smu_features[GNLD_DPM_GFXCLK].enabled && 1929 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { 1930 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; 1931 1932 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1933 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1934 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff), 1935 NULL)), 1936 "Failed to set soft max gfxclk!", 1937 return ret); 1938 } 1939 1940 if (data->smu_features[GNLD_DPM_UCLK].enabled && 1941 (feature_mask & FEATURE_DPM_UCLK_MASK)) { 1942 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; 1943 1944 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1945 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1946 (PPCLK_UCLK << 16) | (max_freq & 0xffff), 1947 NULL)), 1948 "Failed to set soft max memclk!", 1949 return ret); 1950 } 1951 1952 if (data->smu_features[GNLD_DPM_UVD].enabled && 1953 (feature_mask & FEATURE_DPM_UVD_MASK)) { 1954 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; 1955 1956 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1957 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1958 (PPCLK_VCLK << 16) | (max_freq & 0xffff), 1959 NULL)), 1960 "Failed to set soft max vclk!", 1961 return ret); 1962 1963 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; 1964 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1965 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1966 (PPCLK_DCLK << 16) | (max_freq & 0xffff), 1967 NULL)), 1968 "Failed to set soft max dclk!", 1969 return ret); 1970 } 1971 1972 if (data->smu_features[GNLD_DPM_VCE].enabled && 1973 (feature_mask & FEATURE_DPM_VCE_MASK)) { 1974 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; 1975 1976 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1977 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1978 (PPCLK_ECLK << 16) | (max_freq & 0xffff), 1979 NULL)), 1980 "Failed to set soft max eclk!", 1981 return ret); 1982 } 1983 1984 if (data->smu_features[GNLD_DPM_SOCCLK].enabled && 1985 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { 1986 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; 1987 1988 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1989 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1990 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff), 1991 NULL)), 1992 "Failed to set soft max socclk!", 1993 return ret); 1994 } 1995 1996 if (data->smu_features[GNLD_DPM_FCLK].enabled && 1997 (feature_mask & FEATURE_DPM_FCLK_MASK)) { 1998 max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level; 1999 2000 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 2001 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 2002 (PPCLK_FCLK << 16) | (max_freq & 0xffff), 2003 NULL)), 2004 "Failed to set soft max fclk!", 2005 return ret); 2006 } 2007 2008 return ret; 2009 } 2010 2011 static int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) 2012 { 2013 struct vega20_hwmgr *data = 2014 (struct vega20_hwmgr *)(hwmgr->backend); 2015 int ret = 0; 2016 2017 if (data->smu_features[GNLD_DPM_VCE].supported) { 2018 if (data->smu_features[GNLD_DPM_VCE].enabled == enable) { 2019 if (enable) 2020 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n"); 2021 else 2022 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n"); 2023 } 2024 2025 ret = vega20_enable_smc_features(hwmgr, 2026 enable, 2027 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap); 2028 PP_ASSERT_WITH_CODE(!ret, 2029 "Attempt to Enable/Disable DPM VCE Failed!", 2030 return ret); 2031 data->smu_features[GNLD_DPM_VCE].enabled = enable; 2032 } 2033 2034 return 0; 2035 } 2036 2037 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr, 2038 uint32_t *clock, 2039 PPCLK_e clock_select, 2040 bool max) 2041 { 2042 int ret; 2043 *clock = 0; 2044 2045 if (max) { 2046 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2047 PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16), 2048 clock)) == 0, 2049 "[GetClockRanges] Failed to get max clock from SMC!", 2050 return ret); 2051 } else { 2052 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2053 PPSMC_MSG_GetMinDpmFreq, 2054 (clock_select << 16), 2055 clock)) == 0, 2056 "[GetClockRanges] Failed to get min clock from SMC!", 2057 return ret); 2058 } 2059 2060 return 0; 2061 } 2062 2063 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 2064 { 2065 struct vega20_hwmgr *data = 2066 (struct vega20_hwmgr *)(hwmgr->backend); 2067 uint32_t gfx_clk; 2068 int ret = 0; 2069 2070 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, 2071 "[GetSclks]: gfxclk dpm not enabled!\n", 2072 return -EPERM); 2073 2074 if (low) { 2075 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false); 2076 PP_ASSERT_WITH_CODE(!ret, 2077 "[GetSclks]: fail to get min PPCLK_GFXCLK\n", 2078 return ret); 2079 } else { 2080 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true); 2081 PP_ASSERT_WITH_CODE(!ret, 2082 "[GetSclks]: fail to get max PPCLK_GFXCLK\n", 2083 return ret); 2084 } 2085 2086 return (gfx_clk * 100); 2087 } 2088 2089 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 2090 { 2091 struct vega20_hwmgr *data = 2092 (struct vega20_hwmgr *)(hwmgr->backend); 2093 uint32_t mem_clk; 2094 int ret = 0; 2095 2096 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, 2097 "[MemMclks]: memclk dpm not enabled!\n", 2098 return -EPERM); 2099 2100 if (low) { 2101 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false); 2102 PP_ASSERT_WITH_CODE(!ret, 2103 "[GetMclks]: fail to get min PPCLK_UCLK\n", 2104 return ret); 2105 } else { 2106 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true); 2107 PP_ASSERT_WITH_CODE(!ret, 2108 "[GetMclks]: fail to get max PPCLK_UCLK\n", 2109 return ret); 2110 } 2111 2112 return (mem_clk * 100); 2113 } 2114 2115 static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, 2116 SmuMetrics_t *metrics_table, 2117 bool bypass_cache) 2118 { 2119 struct vega20_hwmgr *data = 2120 (struct vega20_hwmgr *)(hwmgr->backend); 2121 int ret = 0; 2122 2123 if (bypass_cache || 2124 !data->metrics_time || 2125 time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) { 2126 ret = smum_smc_table_manager(hwmgr, 2127 (uint8_t *)(&data->metrics_table), 2128 TABLE_SMU_METRICS, 2129 true); 2130 if (ret) { 2131 pr_info("Failed to export SMU metrics table!\n"); 2132 return ret; 2133 } 2134 data->metrics_time = jiffies; 2135 } 2136 2137 if (metrics_table) 2138 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t)); 2139 2140 return ret; 2141 } 2142 2143 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, int idx, 2144 uint32_t *query) 2145 { 2146 int ret = 0; 2147 SmuMetrics_t metrics_table; 2148 2149 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2150 if (ret) 2151 return ret; 2152 2153 /* For the 40.46 release, they changed the value name */ 2154 switch (idx) { 2155 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 2156 if (hwmgr->smu_version == 0x282e00) 2157 *query = metrics_table.AverageSocketPower << 8; 2158 else 2159 ret = -EOPNOTSUPP; 2160 break; 2161 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 2162 *query = metrics_table.CurrSocketPower << 8; 2163 break; 2164 } 2165 2166 return ret; 2167 } 2168 2169 static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr, 2170 PPCLK_e clk_id, uint32_t *clk_freq) 2171 { 2172 int ret = 0; 2173 2174 *clk_freq = 0; 2175 2176 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2177 PPSMC_MSG_GetDpmClockFreq, (clk_id << 16), 2178 clk_freq)) == 0, 2179 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!", 2180 return ret); 2181 2182 *clk_freq = *clk_freq * 100; 2183 2184 return 0; 2185 } 2186 2187 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr, 2188 int idx, 2189 uint32_t *activity_percent) 2190 { 2191 int ret = 0; 2192 SmuMetrics_t metrics_table; 2193 2194 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2195 if (ret) 2196 return ret; 2197 2198 switch (idx) { 2199 case AMDGPU_PP_SENSOR_GPU_LOAD: 2200 *activity_percent = metrics_table.AverageGfxActivity; 2201 break; 2202 case AMDGPU_PP_SENSOR_MEM_LOAD: 2203 *activity_percent = metrics_table.AverageUclkActivity; 2204 break; 2205 default: 2206 pr_err("Invalid index for retrieving clock activity\n"); 2207 return -EINVAL; 2208 } 2209 2210 return ret; 2211 } 2212 2213 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, 2214 void *value, int *size) 2215 { 2216 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2217 struct amdgpu_device *adev = hwmgr->adev; 2218 SmuMetrics_t metrics_table; 2219 uint32_t val_vid; 2220 int ret = 0; 2221 2222 switch (idx) { 2223 case AMDGPU_PP_SENSOR_GFX_SCLK: 2224 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2225 if (ret) 2226 return ret; 2227 2228 *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100; 2229 *size = 4; 2230 break; 2231 case AMDGPU_PP_SENSOR_GFX_MCLK: 2232 ret = vega20_get_current_clk_freq(hwmgr, 2233 PPCLK_UCLK, 2234 (uint32_t *)value); 2235 if (!ret) 2236 *size = 4; 2237 break; 2238 case AMDGPU_PP_SENSOR_GPU_LOAD: 2239 case AMDGPU_PP_SENSOR_MEM_LOAD: 2240 ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value); 2241 if (!ret) 2242 *size = 4; 2243 break; 2244 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 2245 *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr); 2246 *size = 4; 2247 break; 2248 case AMDGPU_PP_SENSOR_EDGE_TEMP: 2249 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2250 if (ret) 2251 return ret; 2252 2253 *((uint32_t *)value) = metrics_table.TemperatureEdge * 2254 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2255 *size = 4; 2256 break; 2257 case AMDGPU_PP_SENSOR_MEM_TEMP: 2258 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2259 if (ret) 2260 return ret; 2261 2262 *((uint32_t *)value) = metrics_table.TemperatureHBM * 2263 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2264 *size = 4; 2265 break; 2266 case AMDGPU_PP_SENSOR_UVD_POWER: 2267 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; 2268 *size = 4; 2269 break; 2270 case AMDGPU_PP_SENSOR_VCE_POWER: 2271 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; 2272 *size = 4; 2273 break; 2274 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 2275 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 2276 *size = 16; 2277 ret = vega20_get_gpu_power(hwmgr, idx, (uint32_t *)value); 2278 break; 2279 case AMDGPU_PP_SENSOR_VDDGFX: 2280 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & 2281 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 2282 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 2283 *((uint32_t *)value) = 2284 (uint32_t)convert_to_vddc((uint8_t)val_vid); 2285 break; 2286 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2287 ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value); 2288 if (!ret) 2289 *size = 8; 2290 break; 2291 default: 2292 ret = -EOPNOTSUPP; 2293 break; 2294 } 2295 return ret; 2296 } 2297 2298 static int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 2299 struct pp_display_clock_request *clock_req) 2300 { 2301 int result = 0; 2302 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2303 enum amd_pp_clock_type clk_type = clock_req->clock_type; 2304 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 2305 PPCLK_e clk_select = 0; 2306 uint32_t clk_request = 0; 2307 2308 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 2309 switch (clk_type) { 2310 case amd_pp_dcef_clock: 2311 clk_select = PPCLK_DCEFCLK; 2312 break; 2313 case amd_pp_disp_clock: 2314 clk_select = PPCLK_DISPCLK; 2315 break; 2316 case amd_pp_pixel_clock: 2317 clk_select = PPCLK_PIXCLK; 2318 break; 2319 case amd_pp_phy_clock: 2320 clk_select = PPCLK_PHYCLK; 2321 break; 2322 default: 2323 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); 2324 result = -EINVAL; 2325 break; 2326 } 2327 2328 if (!result) { 2329 clk_request = (clk_select << 16) | clk_freq; 2330 result = smum_send_msg_to_smc_with_parameter(hwmgr, 2331 PPSMC_MSG_SetHardMinByFreq, 2332 clk_request, 2333 NULL); 2334 } 2335 } 2336 2337 return result; 2338 } 2339 2340 static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 2341 PHM_PerformanceLevelDesignation designation, uint32_t index, 2342 PHM_PerformanceLevel *level) 2343 { 2344 return 0; 2345 } 2346 2347 static int vega20_notify_smc_display_config_after_ps_adjustment( 2348 struct pp_hwmgr *hwmgr) 2349 { 2350 struct vega20_hwmgr *data = 2351 (struct vega20_hwmgr *)(hwmgr->backend); 2352 struct vega20_single_dpm_table *dpm_table = 2353 &data->dpm_table.mem_table; 2354 struct PP_Clocks min_clocks = {0}; 2355 struct pp_display_clock_request clock_req; 2356 int ret = 0; 2357 2358 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; 2359 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; 2360 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 2361 2362 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { 2363 clock_req.clock_type = amd_pp_dcef_clock; 2364 clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10; 2365 if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) { 2366 if (data->smu_features[GNLD_DS_DCEFCLK].supported) 2367 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter( 2368 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 2369 min_clocks.dcefClockInSR / 100, 2370 NULL)) == 0, 2371 "Attempt to set divider for DCEFCLK Failed!", 2372 return ret); 2373 } else { 2374 pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); 2375 } 2376 } 2377 2378 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 2379 dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100; 2380 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2381 PPSMC_MSG_SetHardMinByFreq, 2382 (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level, 2383 NULL)), 2384 "[SetHardMinFreq] Set hard min uclk failed!", 2385 return ret); 2386 } 2387 2388 return 0; 2389 } 2390 2391 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) 2392 { 2393 struct vega20_hwmgr *data = 2394 (struct vega20_hwmgr *)(hwmgr->backend); 2395 uint32_t soft_level; 2396 int ret = 0; 2397 2398 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 2399 2400 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2401 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2402 data->dpm_table.gfx_table.dpm_levels[soft_level].value; 2403 2404 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); 2405 2406 data->dpm_table.mem_table.dpm_state.soft_min_level = 2407 data->dpm_table.mem_table.dpm_state.soft_max_level = 2408 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2409 2410 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); 2411 2412 data->dpm_table.soc_table.dpm_state.soft_min_level = 2413 data->dpm_table.soc_table.dpm_state.soft_max_level = 2414 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2415 2416 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2417 FEATURE_DPM_UCLK_MASK | 2418 FEATURE_DPM_SOCCLK_MASK); 2419 PP_ASSERT_WITH_CODE(!ret, 2420 "Failed to upload boot level to highest!", 2421 return ret); 2422 2423 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2424 FEATURE_DPM_UCLK_MASK | 2425 FEATURE_DPM_SOCCLK_MASK); 2426 PP_ASSERT_WITH_CODE(!ret, 2427 "Failed to upload dpm max level to highest!", 2428 return ret); 2429 2430 return 0; 2431 } 2432 2433 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) 2434 { 2435 struct vega20_hwmgr *data = 2436 (struct vega20_hwmgr *)(hwmgr->backend); 2437 uint32_t soft_level; 2438 int ret = 0; 2439 2440 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 2441 2442 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2443 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2444 data->dpm_table.gfx_table.dpm_levels[soft_level].value; 2445 2446 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 2447 2448 data->dpm_table.mem_table.dpm_state.soft_min_level = 2449 data->dpm_table.mem_table.dpm_state.soft_max_level = 2450 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2451 2452 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 2453 2454 data->dpm_table.soc_table.dpm_state.soft_min_level = 2455 data->dpm_table.soc_table.dpm_state.soft_max_level = 2456 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2457 2458 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2459 FEATURE_DPM_UCLK_MASK | 2460 FEATURE_DPM_SOCCLK_MASK); 2461 PP_ASSERT_WITH_CODE(!ret, 2462 "Failed to upload boot level to highest!", 2463 return ret); 2464 2465 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2466 FEATURE_DPM_UCLK_MASK | 2467 FEATURE_DPM_SOCCLK_MASK); 2468 PP_ASSERT_WITH_CODE(!ret, 2469 "Failed to upload dpm max level to highest!", 2470 return ret); 2471 2472 return 0; 2473 2474 } 2475 2476 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2477 { 2478 struct vega20_hwmgr *data = 2479 (struct vega20_hwmgr *)(hwmgr->backend); 2480 uint32_t soft_min_level, soft_max_level; 2481 int ret = 0; 2482 2483 /* gfxclk soft min/max settings */ 2484 soft_min_level = 2485 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 2486 soft_max_level = 2487 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 2488 2489 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2490 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2491 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2492 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; 2493 2494 /* uclk soft min/max settings */ 2495 soft_min_level = 2496 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 2497 soft_max_level = 2498 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); 2499 2500 data->dpm_table.mem_table.dpm_state.soft_min_level = 2501 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2502 data->dpm_table.mem_table.dpm_state.soft_max_level = 2503 data->dpm_table.mem_table.dpm_levels[soft_max_level].value; 2504 2505 /* socclk soft min/max settings */ 2506 soft_min_level = 2507 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 2508 soft_max_level = 2509 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); 2510 2511 data->dpm_table.soc_table.dpm_state.soft_min_level = 2512 data->dpm_table.soc_table.dpm_levels[soft_min_level].value; 2513 data->dpm_table.soc_table.dpm_state.soft_max_level = 2514 data->dpm_table.soc_table.dpm_levels[soft_max_level].value; 2515 2516 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2517 FEATURE_DPM_UCLK_MASK | 2518 FEATURE_DPM_SOCCLK_MASK); 2519 PP_ASSERT_WITH_CODE(!ret, 2520 "Failed to upload DPM Bootup Levels!", 2521 return ret); 2522 2523 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2524 FEATURE_DPM_UCLK_MASK | 2525 FEATURE_DPM_SOCCLK_MASK); 2526 PP_ASSERT_WITH_CODE(!ret, 2527 "Failed to upload DPM Max Levels!", 2528 return ret); 2529 2530 return 0; 2531 } 2532 2533 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, 2534 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) 2535 { 2536 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2537 struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table); 2538 struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table); 2539 struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table); 2540 2541 *sclk_mask = 0; 2542 *mclk_mask = 0; 2543 *soc_mask = 0; 2544 2545 if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && 2546 mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL && 2547 soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) { 2548 *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL; 2549 *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL; 2550 *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL; 2551 } 2552 2553 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 2554 *sclk_mask = 0; 2555 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 2556 *mclk_mask = 0; 2557 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 2558 *sclk_mask = gfx_dpm_table->count - 1; 2559 *mclk_mask = mem_dpm_table->count - 1; 2560 *soc_mask = soc_dpm_table->count - 1; 2561 } 2562 2563 return 0; 2564 } 2565 2566 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, 2567 enum pp_clock_type type, uint32_t mask) 2568 { 2569 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2570 uint32_t soft_min_level, soft_max_level, hard_min_level; 2571 int ret = 0; 2572 2573 switch (type) { 2574 case PP_SCLK: 2575 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2576 soft_max_level = mask ? (fls(mask) - 1) : 0; 2577 2578 if (soft_max_level >= data->dpm_table.gfx_table.count) { 2579 pr_err("Clock level specified %d is over max allowed %d\n", 2580 soft_max_level, 2581 data->dpm_table.gfx_table.count - 1); 2582 return -EINVAL; 2583 } 2584 2585 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2586 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2587 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2588 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; 2589 2590 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); 2591 PP_ASSERT_WITH_CODE(!ret, 2592 "Failed to upload boot level to lowest!", 2593 return ret); 2594 2595 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); 2596 PP_ASSERT_WITH_CODE(!ret, 2597 "Failed to upload dpm max level to highest!", 2598 return ret); 2599 break; 2600 2601 case PP_MCLK: 2602 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2603 soft_max_level = mask ? (fls(mask) - 1) : 0; 2604 2605 if (soft_max_level >= data->dpm_table.mem_table.count) { 2606 pr_err("Clock level specified %d is over max allowed %d\n", 2607 soft_max_level, 2608 data->dpm_table.mem_table.count - 1); 2609 return -EINVAL; 2610 } 2611 2612 data->dpm_table.mem_table.dpm_state.soft_min_level = 2613 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2614 data->dpm_table.mem_table.dpm_state.soft_max_level = 2615 data->dpm_table.mem_table.dpm_levels[soft_max_level].value; 2616 2617 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK); 2618 PP_ASSERT_WITH_CODE(!ret, 2619 "Failed to upload boot level to lowest!", 2620 return ret); 2621 2622 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK); 2623 PP_ASSERT_WITH_CODE(!ret, 2624 "Failed to upload dpm max level to highest!", 2625 return ret); 2626 2627 break; 2628 2629 case PP_SOCCLK: 2630 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2631 soft_max_level = mask ? (fls(mask) - 1) : 0; 2632 2633 if (soft_max_level >= data->dpm_table.soc_table.count) { 2634 pr_err("Clock level specified %d is over max allowed %d\n", 2635 soft_max_level, 2636 data->dpm_table.soc_table.count - 1); 2637 return -EINVAL; 2638 } 2639 2640 data->dpm_table.soc_table.dpm_state.soft_min_level = 2641 data->dpm_table.soc_table.dpm_levels[soft_min_level].value; 2642 data->dpm_table.soc_table.dpm_state.soft_max_level = 2643 data->dpm_table.soc_table.dpm_levels[soft_max_level].value; 2644 2645 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); 2646 PP_ASSERT_WITH_CODE(!ret, 2647 "Failed to upload boot level to lowest!", 2648 return ret); 2649 2650 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); 2651 PP_ASSERT_WITH_CODE(!ret, 2652 "Failed to upload dpm max level to highest!", 2653 return ret); 2654 2655 break; 2656 2657 case PP_FCLK: 2658 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2659 soft_max_level = mask ? (fls(mask) - 1) : 0; 2660 2661 if (soft_max_level >= data->dpm_table.fclk_table.count) { 2662 pr_err("Clock level specified %d is over max allowed %d\n", 2663 soft_max_level, 2664 data->dpm_table.fclk_table.count - 1); 2665 return -EINVAL; 2666 } 2667 2668 data->dpm_table.fclk_table.dpm_state.soft_min_level = 2669 data->dpm_table.fclk_table.dpm_levels[soft_min_level].value; 2670 data->dpm_table.fclk_table.dpm_state.soft_max_level = 2671 data->dpm_table.fclk_table.dpm_levels[soft_max_level].value; 2672 2673 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK); 2674 PP_ASSERT_WITH_CODE(!ret, 2675 "Failed to upload boot level to lowest!", 2676 return ret); 2677 2678 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK); 2679 PP_ASSERT_WITH_CODE(!ret, 2680 "Failed to upload dpm max level to highest!", 2681 return ret); 2682 2683 break; 2684 2685 case PP_DCEFCLK: 2686 hard_min_level = mask ? (ffs(mask) - 1) : 0; 2687 2688 if (hard_min_level >= data->dpm_table.dcef_table.count) { 2689 pr_err("Clock level specified %d is over max allowed %d\n", 2690 hard_min_level, 2691 data->dpm_table.dcef_table.count - 1); 2692 return -EINVAL; 2693 } 2694 2695 data->dpm_table.dcef_table.dpm_state.hard_min_level = 2696 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value; 2697 2698 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK); 2699 PP_ASSERT_WITH_CODE(!ret, 2700 "Failed to upload boot level to lowest!", 2701 return ret); 2702 2703 //TODO: Setting DCEFCLK max dpm level is not supported 2704 2705 break; 2706 2707 case PP_PCIE: 2708 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2709 soft_max_level = mask ? (fls(mask) - 1) : 0; 2710 if (soft_min_level >= NUM_LINK_LEVELS || 2711 soft_max_level >= NUM_LINK_LEVELS) 2712 return -EINVAL; 2713 2714 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2715 PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level, 2716 NULL); 2717 PP_ASSERT_WITH_CODE(!ret, 2718 "Failed to set min link dpm level!", 2719 return ret); 2720 2721 break; 2722 2723 default: 2724 break; 2725 } 2726 2727 return 0; 2728 } 2729 2730 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 2731 enum amd_dpm_forced_level level) 2732 { 2733 int ret = 0; 2734 uint32_t sclk_mask, mclk_mask, soc_mask; 2735 2736 switch (level) { 2737 case AMD_DPM_FORCED_LEVEL_HIGH: 2738 ret = vega20_force_dpm_highest(hwmgr); 2739 break; 2740 2741 case AMD_DPM_FORCED_LEVEL_LOW: 2742 ret = vega20_force_dpm_lowest(hwmgr); 2743 break; 2744 2745 case AMD_DPM_FORCED_LEVEL_AUTO: 2746 ret = vega20_unforce_dpm_levels(hwmgr); 2747 break; 2748 2749 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 2750 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 2751 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 2752 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 2753 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); 2754 if (ret) 2755 return ret; 2756 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask); 2757 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask); 2758 vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask); 2759 break; 2760 2761 case AMD_DPM_FORCED_LEVEL_MANUAL: 2762 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 2763 default: 2764 break; 2765 } 2766 2767 return ret; 2768 } 2769 2770 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr) 2771 { 2772 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2773 2774 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false) 2775 return AMD_FAN_CTRL_MANUAL; 2776 else 2777 return AMD_FAN_CTRL_AUTO; 2778 } 2779 2780 static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 2781 { 2782 switch (mode) { 2783 case AMD_FAN_CTRL_NONE: 2784 vega20_fan_ctrl_set_fan_speed_pwm(hwmgr, 255); 2785 break; 2786 case AMD_FAN_CTRL_MANUAL: 2787 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 2788 vega20_fan_ctrl_stop_smc_fan_control(hwmgr); 2789 break; 2790 case AMD_FAN_CTRL_AUTO: 2791 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 2792 vega20_fan_ctrl_start_smc_fan_control(hwmgr); 2793 break; 2794 default: 2795 break; 2796 } 2797 } 2798 2799 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr, 2800 struct amd_pp_simple_clock_info *info) 2801 { 2802 #if 0 2803 struct phm_ppt_v2_information *table_info = 2804 (struct phm_ppt_v2_information *)hwmgr->pptable; 2805 struct phm_clock_and_voltage_limits *max_limits = 2806 &table_info->max_clock_voltage_on_ac; 2807 2808 info->engine_max_clock = max_limits->sclk; 2809 info->memory_max_clock = max_limits->mclk; 2810 #endif 2811 return 0; 2812 } 2813 2814 2815 static int vega20_get_sclks(struct pp_hwmgr *hwmgr, 2816 struct pp_clock_levels_with_latency *clocks) 2817 { 2818 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2819 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); 2820 int i, count; 2821 2822 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled) 2823 return -1; 2824 2825 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2826 clocks->num_levels = count; 2827 2828 for (i = 0; i < count; i++) { 2829 clocks->data[i].clocks_in_khz = 2830 dpm_table->dpm_levels[i].value * 1000; 2831 clocks->data[i].latency_in_us = 0; 2832 } 2833 2834 return 0; 2835 } 2836 2837 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr, 2838 uint32_t clock) 2839 { 2840 return 25; 2841 } 2842 2843 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, 2844 struct pp_clock_levels_with_latency *clocks) 2845 { 2846 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2847 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); 2848 int i, count; 2849 2850 if (!data->smu_features[GNLD_DPM_UCLK].enabled) 2851 return -1; 2852 2853 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2854 clocks->num_levels = data->mclk_latency_table.count = count; 2855 2856 for (i = 0; i < count; i++) { 2857 clocks->data[i].clocks_in_khz = 2858 data->mclk_latency_table.entries[i].frequency = 2859 dpm_table->dpm_levels[i].value * 1000; 2860 clocks->data[i].latency_in_us = 2861 data->mclk_latency_table.entries[i].latency = 2862 vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); 2863 } 2864 2865 return 0; 2866 } 2867 2868 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, 2869 struct pp_clock_levels_with_latency *clocks) 2870 { 2871 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2872 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); 2873 int i, count; 2874 2875 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled) 2876 return -1; 2877 2878 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2879 clocks->num_levels = count; 2880 2881 for (i = 0; i < count; i++) { 2882 clocks->data[i].clocks_in_khz = 2883 dpm_table->dpm_levels[i].value * 1000; 2884 clocks->data[i].latency_in_us = 0; 2885 } 2886 2887 return 0; 2888 } 2889 2890 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, 2891 struct pp_clock_levels_with_latency *clocks) 2892 { 2893 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2894 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); 2895 int i, count; 2896 2897 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled) 2898 return -1; 2899 2900 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2901 clocks->num_levels = count; 2902 2903 for (i = 0; i < count; i++) { 2904 clocks->data[i].clocks_in_khz = 2905 dpm_table->dpm_levels[i].value * 1000; 2906 clocks->data[i].latency_in_us = 0; 2907 } 2908 2909 return 0; 2910 2911 } 2912 2913 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 2914 enum amd_pp_clock_type type, 2915 struct pp_clock_levels_with_latency *clocks) 2916 { 2917 int ret; 2918 2919 switch (type) { 2920 case amd_pp_sys_clock: 2921 ret = vega20_get_sclks(hwmgr, clocks); 2922 break; 2923 case amd_pp_mem_clock: 2924 ret = vega20_get_memclocks(hwmgr, clocks); 2925 break; 2926 case amd_pp_dcef_clock: 2927 ret = vega20_get_dcefclocks(hwmgr, clocks); 2928 break; 2929 case amd_pp_soc_clock: 2930 ret = vega20_get_socclocks(hwmgr, clocks); 2931 break; 2932 default: 2933 return -EINVAL; 2934 } 2935 2936 return ret; 2937 } 2938 2939 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 2940 enum amd_pp_clock_type type, 2941 struct pp_clock_levels_with_voltage *clocks) 2942 { 2943 clocks->num_levels = 0; 2944 2945 return 0; 2946 } 2947 2948 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 2949 void *clock_ranges) 2950 { 2951 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2952 Watermarks_t *table = &(data->smc_state_table.water_marks_table); 2953 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; 2954 2955 if (!data->registry_data.disable_water_mark && 2956 data->smu_features[GNLD_DPM_DCEFCLK].supported && 2957 data->smu_features[GNLD_DPM_SOCCLK].supported) { 2958 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); 2959 data->water_marks_bitmap |= WaterMarksExist; 2960 data->water_marks_bitmap &= ~WaterMarksLoaded; 2961 } 2962 2963 return 0; 2964 } 2965 2966 static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 2967 enum PP_OD_DPM_TABLE_COMMAND type, 2968 long *input, uint32_t size) 2969 { 2970 struct vega20_hwmgr *data = 2971 (struct vega20_hwmgr *)(hwmgr->backend); 2972 struct vega20_od8_single_setting *od8_settings = 2973 data->od8_settings.od8_settings_array; 2974 OverDriveTable_t *od_table = 2975 &(data->smc_state_table.overdrive_table); 2976 int32_t input_clk, input_vol, i; 2977 uint32_t input_index; 2978 int od8_id; 2979 int ret; 2980 2981 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", 2982 return -EINVAL); 2983 2984 switch (type) { 2985 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2986 if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 2987 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) { 2988 pr_info("Sclk min/max frequency overdrive not supported\n"); 2989 return -EOPNOTSUPP; 2990 } 2991 2992 for (i = 0; i < size; i += 2) { 2993 if (i + 2 > size) { 2994 pr_info("invalid number of input parameters %d\n", 2995 size); 2996 return -EINVAL; 2997 } 2998 2999 input_index = input[i]; 3000 input_clk = input[i + 1]; 3001 3002 if (input_index != 0 && input_index != 1) { 3003 pr_info("Invalid index %d\n", input_index); 3004 pr_info("Support min/max sclk frequency setting only which index by 0/1\n"); 3005 return -EINVAL; 3006 } 3007 3008 if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value || 3009 input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) { 3010 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 3011 input_clk, 3012 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, 3013 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); 3014 return -EINVAL; 3015 } 3016 3017 if ((input_index == 0 && od_table->GfxclkFmin != input_clk) || 3018 (input_index == 1 && od_table->GfxclkFmax != input_clk)) 3019 data->gfxclk_overdrive = true; 3020 3021 if (input_index == 0) 3022 od_table->GfxclkFmin = input_clk; 3023 else 3024 od_table->GfxclkFmax = input_clk; 3025 } 3026 3027 break; 3028 3029 case PP_OD_EDIT_MCLK_VDDC_TABLE: 3030 if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3031 pr_info("Mclk max frequency overdrive not supported\n"); 3032 return -EOPNOTSUPP; 3033 } 3034 3035 for (i = 0; i < size; i += 2) { 3036 if (i + 2 > size) { 3037 pr_info("invalid number of input parameters %d\n", 3038 size); 3039 return -EINVAL; 3040 } 3041 3042 input_index = input[i]; 3043 input_clk = input[i + 1]; 3044 3045 if (input_index != 1) { 3046 pr_info("Invalid index %d\n", input_index); 3047 pr_info("Support max Mclk frequency setting only which index by 1\n"); 3048 return -EINVAL; 3049 } 3050 3051 if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || 3052 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { 3053 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 3054 input_clk, 3055 od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 3056 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3057 return -EINVAL; 3058 } 3059 3060 if (input_index == 1 && od_table->UclkFmax != input_clk) 3061 data->memclk_overdrive = true; 3062 3063 od_table->UclkFmax = input_clk; 3064 } 3065 3066 break; 3067 3068 case PP_OD_EDIT_VDDC_CURVE: 3069 if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3070 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3071 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3072 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3073 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3074 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) { 3075 pr_info("Voltage curve calibrate not supported\n"); 3076 return -EOPNOTSUPP; 3077 } 3078 3079 for (i = 0; i < size; i += 3) { 3080 if (i + 3 > size) { 3081 pr_info("invalid number of input parameters %d\n", 3082 size); 3083 return -EINVAL; 3084 } 3085 3086 input_index = input[i]; 3087 input_clk = input[i + 1]; 3088 input_vol = input[i + 2]; 3089 3090 if (input_index > 2) { 3091 pr_info("Setting for point %d is not supported\n", 3092 input_index + 1); 3093 pr_info("Three supported points index by 0, 1, 2\n"); 3094 return -EINVAL; 3095 } 3096 3097 od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index; 3098 if (input_clk < od8_settings[od8_id].min_value || 3099 input_clk > od8_settings[od8_id].max_value) { 3100 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 3101 input_clk, 3102 od8_settings[od8_id].min_value, 3103 od8_settings[od8_id].max_value); 3104 return -EINVAL; 3105 } 3106 3107 od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index; 3108 if (input_vol < od8_settings[od8_id].min_value || 3109 input_vol > od8_settings[od8_id].max_value) { 3110 pr_info("clock voltage %d is not within allowed range [%d - %d]\n", 3111 input_vol, 3112 od8_settings[od8_id].min_value, 3113 od8_settings[od8_id].max_value); 3114 return -EINVAL; 3115 } 3116 3117 switch (input_index) { 3118 case 0: 3119 od_table->GfxclkFreq1 = input_clk; 3120 od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE; 3121 break; 3122 case 1: 3123 od_table->GfxclkFreq2 = input_clk; 3124 od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE; 3125 break; 3126 case 2: 3127 od_table->GfxclkFreq3 = input_clk; 3128 od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE; 3129 break; 3130 } 3131 } 3132 break; 3133 3134 case PP_OD_RESTORE_DEFAULT_TABLE: 3135 data->gfxclk_overdrive = false; 3136 data->memclk_overdrive = false; 3137 3138 ret = smum_smc_table_manager(hwmgr, 3139 (uint8_t *)od_table, 3140 TABLE_OVERDRIVE, true); 3141 PP_ASSERT_WITH_CODE(!ret, 3142 "Failed to export overdrive table!", 3143 return ret); 3144 break; 3145 3146 case PP_OD_COMMIT_DPM_TABLE: 3147 ret = smum_smc_table_manager(hwmgr, 3148 (uint8_t *)od_table, 3149 TABLE_OVERDRIVE, false); 3150 PP_ASSERT_WITH_CODE(!ret, 3151 "Failed to import overdrive table!", 3152 return ret); 3153 3154 /* retrieve updated gfxclk table */ 3155 if (data->gfxclk_overdrive) { 3156 data->gfxclk_overdrive = false; 3157 3158 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 3159 if (ret) 3160 return ret; 3161 } 3162 3163 /* retrieve updated memclk table */ 3164 if (data->memclk_overdrive) { 3165 data->memclk_overdrive = false; 3166 3167 ret = vega20_setup_memclk_dpm_table(hwmgr); 3168 if (ret) 3169 return ret; 3170 } 3171 break; 3172 3173 default: 3174 return -EINVAL; 3175 } 3176 3177 return 0; 3178 } 3179 3180 static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr, 3181 enum pp_mp1_state mp1_state) 3182 { 3183 uint16_t msg; 3184 int ret; 3185 3186 switch (mp1_state) { 3187 case PP_MP1_STATE_SHUTDOWN: 3188 msg = PPSMC_MSG_PrepareMp1ForShutdown; 3189 break; 3190 case PP_MP1_STATE_UNLOAD: 3191 msg = PPSMC_MSG_PrepareMp1ForUnload; 3192 break; 3193 case PP_MP1_STATE_RESET: 3194 msg = PPSMC_MSG_PrepareMp1ForReset; 3195 break; 3196 case PP_MP1_STATE_NONE: 3197 default: 3198 return 0; 3199 } 3200 3201 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, 3202 "[PrepareMp1] Failed!", 3203 return ret); 3204 3205 return 0; 3206 } 3207 3208 static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) 3209 { 3210 static const char *ppfeature_name[] = { 3211 "DPM_PREFETCHER", 3212 "GFXCLK_DPM", 3213 "UCLK_DPM", 3214 "SOCCLK_DPM", 3215 "UVD_DPM", 3216 "VCE_DPM", 3217 "ULV", 3218 "MP0CLK_DPM", 3219 "LINK_DPM", 3220 "DCEFCLK_DPM", 3221 "GFXCLK_DS", 3222 "SOCCLK_DS", 3223 "LCLK_DS", 3224 "PPT", 3225 "TDC", 3226 "THERMAL", 3227 "GFX_PER_CU_CG", 3228 "RM", 3229 "DCEFCLK_DS", 3230 "ACDC", 3231 "VR0HOT", 3232 "VR1HOT", 3233 "FW_CTF", 3234 "LED_DISPLAY", 3235 "FAN_CONTROL", 3236 "GFX_EDC", 3237 "GFXOFF", 3238 "CG", 3239 "FCLK_DPM", 3240 "FCLK_DS", 3241 "MP1CLK_DS", 3242 "MP0CLK_DS", 3243 "XGMI", 3244 "ECC"}; 3245 static const char *output_title[] = { 3246 "FEATURES", 3247 "BITMASK", 3248 "ENABLEMENT"}; 3249 uint64_t features_enabled; 3250 int i; 3251 int ret = 0; 3252 int size = 0; 3253 3254 phm_get_sysfs_buf(&buf, &size); 3255 3256 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3257 PP_ASSERT_WITH_CODE(!ret, 3258 "[EnableAllSmuFeatures] Failed to get enabled smc features!", 3259 return ret); 3260 3261 size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled); 3262 size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n", 3263 output_title[0], 3264 output_title[1], 3265 output_title[2]); 3266 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 3267 size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n", 3268 ppfeature_name[i], 3269 1ULL << i, 3270 (features_enabled & (1ULL << i)) ? "Y" : "N"); 3271 } 3272 3273 return size; 3274 } 3275 3276 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) 3277 { 3278 struct vega20_hwmgr *data = 3279 (struct vega20_hwmgr *)(hwmgr->backend); 3280 uint64_t features_enabled, features_to_enable, features_to_disable; 3281 int i, ret = 0; 3282 bool enabled; 3283 3284 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) 3285 return -EINVAL; 3286 3287 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3288 if (ret) 3289 return ret; 3290 3291 features_to_disable = 3292 features_enabled & ~new_ppfeature_masks; 3293 features_to_enable = 3294 ~features_enabled & new_ppfeature_masks; 3295 3296 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 3297 pr_debug("features_to_enable 0x%llx\n", features_to_enable); 3298 3299 if (features_to_disable) { 3300 ret = vega20_enable_smc_features(hwmgr, false, features_to_disable); 3301 if (ret) 3302 return ret; 3303 } 3304 3305 if (features_to_enable) { 3306 ret = vega20_enable_smc_features(hwmgr, true, features_to_enable); 3307 if (ret) 3308 return ret; 3309 } 3310 3311 /* Update the cached feature enablement state */ 3312 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3313 if (ret) 3314 return ret; 3315 3316 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 3317 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? 3318 true : false; 3319 data->smu_features[i].enabled = enabled; 3320 } 3321 3322 return 0; 3323 } 3324 3325 static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) 3326 { 3327 struct amdgpu_device *adev = hwmgr->adev; 3328 3329 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 3330 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 3331 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 3332 } 3333 3334 static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr) 3335 { 3336 uint32_t width_level; 3337 3338 width_level = vega20_get_current_pcie_link_width_level(hwmgr); 3339 if (width_level > LINK_WIDTH_MAX) 3340 width_level = 0; 3341 3342 return link_width[width_level]; 3343 } 3344 3345 static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) 3346 { 3347 struct amdgpu_device *adev = hwmgr->adev; 3348 3349 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 3350 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 3351 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 3352 } 3353 3354 static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr) 3355 { 3356 uint32_t speed_level; 3357 3358 speed_level = vega20_get_current_pcie_link_speed_level(hwmgr); 3359 if (speed_level > LINK_SPEED_MAX) 3360 speed_level = 0; 3361 3362 return link_speed[speed_level]; 3363 } 3364 3365 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, 3366 enum pp_clock_type type, char *buf) 3367 { 3368 struct vega20_hwmgr *data = 3369 (struct vega20_hwmgr *)(hwmgr->backend); 3370 struct vega20_od8_single_setting *od8_settings = 3371 data->od8_settings.od8_settings_array; 3372 OverDriveTable_t *od_table = 3373 &(data->smc_state_table.overdrive_table); 3374 PPTable_t *pptable = &(data->smc_state_table.pp_table); 3375 struct pp_clock_levels_with_latency clocks; 3376 struct vega20_single_dpm_table *fclk_dpm_table = 3377 &(data->dpm_table.fclk_table); 3378 int i, now, size = 0; 3379 int ret = 0; 3380 uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; 3381 3382 switch (type) { 3383 case PP_SCLK: 3384 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); 3385 PP_ASSERT_WITH_CODE(!ret, 3386 "Attempt to get current gfx clk Failed!", 3387 return ret); 3388 3389 if (vega20_get_sclks(hwmgr, &clocks)) { 3390 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3391 now / 100); 3392 break; 3393 } 3394 3395 for (i = 0; i < clocks.num_levels; i++) 3396 size += sprintf(buf + size, "%d: %uMhz %s\n", 3397 i, clocks.data[i].clocks_in_khz / 1000, 3398 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3399 break; 3400 3401 case PP_MCLK: 3402 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now); 3403 PP_ASSERT_WITH_CODE(!ret, 3404 "Attempt to get current mclk freq Failed!", 3405 return ret); 3406 3407 if (vega20_get_memclocks(hwmgr, &clocks)) { 3408 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3409 now / 100); 3410 break; 3411 } 3412 3413 for (i = 0; i < clocks.num_levels; i++) 3414 size += sprintf(buf + size, "%d: %uMhz %s\n", 3415 i, clocks.data[i].clocks_in_khz / 1000, 3416 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3417 break; 3418 3419 case PP_SOCCLK: 3420 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now); 3421 PP_ASSERT_WITH_CODE(!ret, 3422 "Attempt to get current socclk freq Failed!", 3423 return ret); 3424 3425 if (vega20_get_socclocks(hwmgr, &clocks)) { 3426 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3427 now / 100); 3428 break; 3429 } 3430 3431 for (i = 0; i < clocks.num_levels; i++) 3432 size += sprintf(buf + size, "%d: %uMhz %s\n", 3433 i, clocks.data[i].clocks_in_khz / 1000, 3434 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3435 break; 3436 3437 case PP_FCLK: 3438 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now); 3439 PP_ASSERT_WITH_CODE(!ret, 3440 "Attempt to get current fclk freq Failed!", 3441 return ret); 3442 3443 for (i = 0; i < fclk_dpm_table->count; i++) 3444 size += sprintf(buf + size, "%d: %uMhz %s\n", 3445 i, fclk_dpm_table->dpm_levels[i].value, 3446 fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); 3447 break; 3448 3449 case PP_DCEFCLK: 3450 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now); 3451 PP_ASSERT_WITH_CODE(!ret, 3452 "Attempt to get current dcefclk freq Failed!", 3453 return ret); 3454 3455 if (vega20_get_dcefclocks(hwmgr, &clocks)) { 3456 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3457 now / 100); 3458 break; 3459 } 3460 3461 for (i = 0; i < clocks.num_levels; i++) 3462 size += sprintf(buf + size, "%d: %uMhz %s\n", 3463 i, clocks.data[i].clocks_in_khz / 1000, 3464 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3465 break; 3466 3467 case PP_PCIE: 3468 current_gen_speed = 3469 vega20_get_current_pcie_link_speed_level(hwmgr); 3470 current_lane_width = 3471 vega20_get_current_pcie_link_width_level(hwmgr); 3472 for (i = 0; i < NUM_LINK_LEVELS; i++) { 3473 gen_speed = pptable->PcieGenSpeed[i]; 3474 lane_width = pptable->PcieLaneCount[i]; 3475 3476 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, 3477 (gen_speed == 0) ? "2.5GT/s," : 3478 (gen_speed == 1) ? "5.0GT/s," : 3479 (gen_speed == 2) ? "8.0GT/s," : 3480 (gen_speed == 3) ? "16.0GT/s," : "", 3481 (lane_width == 1) ? "x1" : 3482 (lane_width == 2) ? "x2" : 3483 (lane_width == 3) ? "x4" : 3484 (lane_width == 4) ? "x8" : 3485 (lane_width == 5) ? "x12" : 3486 (lane_width == 6) ? "x16" : "", 3487 pptable->LclkFreq[i], 3488 (current_gen_speed == gen_speed) && 3489 (current_lane_width == lane_width) ? 3490 "*" : ""); 3491 } 3492 break; 3493 3494 case OD_SCLK: 3495 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 3496 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { 3497 size += sprintf(buf + size, "%s:\n", "OD_SCLK"); 3498 size += sprintf(buf + size, "0: %10uMhz\n", 3499 od_table->GfxclkFmin); 3500 size += sprintf(buf + size, "1: %10uMhz\n", 3501 od_table->GfxclkFmax); 3502 } 3503 break; 3504 3505 case OD_MCLK: 3506 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3507 size += sprintf(buf + size, "%s:\n", "OD_MCLK"); 3508 size += sprintf(buf + size, "1: %10uMhz\n", 3509 od_table->UclkFmax); 3510 } 3511 3512 break; 3513 3514 case OD_VDDC_CURVE: 3515 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3516 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3517 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3518 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3519 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3520 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { 3521 size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE"); 3522 size += sprintf(buf + size, "0: %10uMhz %10dmV\n", 3523 od_table->GfxclkFreq1, 3524 od_table->GfxclkVolt1 / VOLTAGE_SCALE); 3525 size += sprintf(buf + size, "1: %10uMhz %10dmV\n", 3526 od_table->GfxclkFreq2, 3527 od_table->GfxclkVolt2 / VOLTAGE_SCALE); 3528 size += sprintf(buf + size, "2: %10uMhz %10dmV\n", 3529 od_table->GfxclkFreq3, 3530 od_table->GfxclkVolt3 / VOLTAGE_SCALE); 3531 } 3532 3533 break; 3534 3535 case OD_RANGE: 3536 size += sprintf(buf + size, "%s:\n", "OD_RANGE"); 3537 3538 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 3539 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { 3540 size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", 3541 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, 3542 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); 3543 } 3544 3545 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3546 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", 3547 od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 3548 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3549 } 3550 3551 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3552 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3553 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3554 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3555 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3556 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { 3557 size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", 3558 od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, 3559 od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); 3560 size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", 3561 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, 3562 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); 3563 size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", 3564 od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, 3565 od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); 3566 size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", 3567 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, 3568 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); 3569 size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", 3570 od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, 3571 od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); 3572 size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", 3573 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, 3574 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); 3575 } 3576 3577 break; 3578 default: 3579 break; 3580 } 3581 return size; 3582 } 3583 3584 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, 3585 struct vega20_single_dpm_table *dpm_table) 3586 { 3587 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3588 int ret = 0; 3589 3590 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 3591 PP_ASSERT_WITH_CODE(dpm_table->count > 0, 3592 "[SetUclkToHightestDpmLevel] Dpm table has no entry!", 3593 return -EINVAL); 3594 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS, 3595 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!", 3596 return -EINVAL); 3597 3598 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3599 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3600 PPSMC_MSG_SetHardMinByFreq, 3601 (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level, 3602 NULL)), 3603 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", 3604 return ret); 3605 } 3606 3607 return ret; 3608 } 3609 3610 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) 3611 { 3612 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3613 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table); 3614 int ret = 0; 3615 3616 if (data->smu_features[GNLD_DPM_FCLK].enabled) { 3617 PP_ASSERT_WITH_CODE(dpm_table->count > 0, 3618 "[SetFclkToHightestDpmLevel] Dpm table has no entry!", 3619 return -EINVAL); 3620 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS, 3621 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!", 3622 return -EINVAL); 3623 3624 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3625 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3626 PPSMC_MSG_SetSoftMinByFreq, 3627 (PPCLK_FCLK << 16) | dpm_table->dpm_state.soft_min_level, 3628 NULL)), 3629 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", 3630 return ret); 3631 } 3632 3633 return ret; 3634 } 3635 3636 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3637 { 3638 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3639 int ret = 0; 3640 3641 smum_send_msg_to_smc_with_parameter(hwmgr, 3642 PPSMC_MSG_NumOfDisplays, 0, NULL); 3643 3644 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, 3645 &data->dpm_table.mem_table); 3646 if (ret) 3647 return ret; 3648 3649 return vega20_set_fclk_to_highest_dpm_level(hwmgr); 3650 } 3651 3652 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3653 { 3654 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3655 int result = 0; 3656 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); 3657 3658 if ((data->water_marks_bitmap & WaterMarksExist) && 3659 !(data->water_marks_bitmap & WaterMarksLoaded)) { 3660 result = smum_smc_table_manager(hwmgr, 3661 (uint8_t *)wm_table, TABLE_WATERMARKS, false); 3662 PP_ASSERT_WITH_CODE(!result, 3663 "Failed to update WMTABLE!", 3664 return result); 3665 data->water_marks_bitmap |= WaterMarksLoaded; 3666 } 3667 3668 if ((data->water_marks_bitmap & WaterMarksExist) && 3669 data->smu_features[GNLD_DPM_DCEFCLK].supported && 3670 data->smu_features[GNLD_DPM_SOCCLK].supported) { 3671 result = smum_send_msg_to_smc_with_parameter(hwmgr, 3672 PPSMC_MSG_NumOfDisplays, 3673 hwmgr->display_config->num_display, 3674 NULL); 3675 } 3676 3677 return result; 3678 } 3679 3680 static int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) 3681 { 3682 struct vega20_hwmgr *data = 3683 (struct vega20_hwmgr *)(hwmgr->backend); 3684 int ret = 0; 3685 3686 if (data->smu_features[GNLD_DPM_UVD].supported) { 3687 if (data->smu_features[GNLD_DPM_UVD].enabled == enable) { 3688 if (enable) 3689 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n"); 3690 else 3691 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n"); 3692 } 3693 3694 ret = vega20_enable_smc_features(hwmgr, 3695 enable, 3696 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap); 3697 PP_ASSERT_WITH_CODE(!ret, 3698 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!", 3699 return ret); 3700 data->smu_features[GNLD_DPM_UVD].enabled = enable; 3701 } 3702 3703 return 0; 3704 } 3705 3706 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) 3707 { 3708 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3709 3710 if (data->vce_power_gated == bgate) 3711 return ; 3712 3713 data->vce_power_gated = bgate; 3714 if (bgate) { 3715 vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3716 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3717 AMD_IP_BLOCK_TYPE_VCE, 3718 AMD_PG_STATE_GATE); 3719 } else { 3720 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3721 AMD_IP_BLOCK_TYPE_VCE, 3722 AMD_PG_STATE_UNGATE); 3723 vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3724 } 3725 3726 } 3727 3728 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) 3729 { 3730 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3731 3732 if (data->uvd_power_gated == bgate) 3733 return ; 3734 3735 data->uvd_power_gated = bgate; 3736 vega20_enable_disable_uvd_dpm(hwmgr, !bgate); 3737 } 3738 3739 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) 3740 { 3741 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3742 struct vega20_single_dpm_table *dpm_table; 3743 bool vblank_too_short = false; 3744 bool disable_mclk_switching; 3745 bool disable_fclk_switching; 3746 uint32_t i, latency; 3747 3748 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 3749 !hwmgr->display_config->multi_monitor_in_sync) || 3750 vblank_too_short; 3751 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; 3752 3753 /* gfxclk */ 3754 dpm_table = &(data->dpm_table.gfx_table); 3755 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3756 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3757 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3758 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3759 3760 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3761 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { 3762 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 3763 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 3764 } 3765 3766 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3767 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3768 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; 3769 } 3770 3771 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3772 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3773 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3774 } 3775 } 3776 3777 /* memclk */ 3778 dpm_table = &(data->dpm_table.mem_table); 3779 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3780 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3781 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3782 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3783 3784 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3785 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { 3786 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 3787 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 3788 } 3789 3790 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 3791 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3792 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; 3793 } 3794 3795 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3796 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3797 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3798 } 3799 } 3800 3801 /* honour DAL's UCLK Hardmin */ 3802 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100)) 3803 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100; 3804 3805 /* Hardmin is dependent on displayconfig */ 3806 if (disable_mclk_switching) { 3807 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3808 for (i = 0; i < data->mclk_latency_table.count - 1; i++) { 3809 if (data->mclk_latency_table.entries[i].latency <= latency) { 3810 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) { 3811 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value; 3812 break; 3813 } 3814 } 3815 } 3816 } 3817 3818 if (hwmgr->display_config->nb_pstate_switch_disable) 3819 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3820 3821 if ((disable_mclk_switching && 3822 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) || 3823 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value) 3824 disable_fclk_switching = true; 3825 else 3826 disable_fclk_switching = false; 3827 3828 /* fclk */ 3829 dpm_table = &(data->dpm_table.fclk_table); 3830 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3831 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3832 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3833 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3834 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching) 3835 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3836 3837 /* vclk */ 3838 dpm_table = &(data->dpm_table.vclk_table); 3839 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3840 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3841 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3842 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3843 3844 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3845 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3846 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3847 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3848 } 3849 3850 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3851 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3852 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3853 } 3854 } 3855 3856 /* dclk */ 3857 dpm_table = &(data->dpm_table.dclk_table); 3858 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3859 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3860 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3861 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3862 3863 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3864 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3865 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3866 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3867 } 3868 3869 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3870 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3871 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3872 } 3873 } 3874 3875 /* socclk */ 3876 dpm_table = &(data->dpm_table.soc_table); 3877 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3878 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3879 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3880 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3881 3882 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3883 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { 3884 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; 3885 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; 3886 } 3887 3888 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3889 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3890 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3891 } 3892 } 3893 3894 /* eclk */ 3895 dpm_table = &(data->dpm_table.eclk_table); 3896 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3897 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3898 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3899 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3900 3901 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3902 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { 3903 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; 3904 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; 3905 } 3906 3907 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3908 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3909 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3910 } 3911 } 3912 3913 return 0; 3914 } 3915 3916 static bool 3917 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 3918 { 3919 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3920 bool is_update_required = false; 3921 3922 if (data->display_timing.num_existing_displays != 3923 hwmgr->display_config->num_display) 3924 is_update_required = true; 3925 3926 if (data->registry_data.gfx_clk_deep_sleep_support && 3927 (data->display_timing.min_clock_in_sr != 3928 hwmgr->display_config->min_core_set_clock_in_sr)) 3929 is_update_required = true; 3930 3931 return is_update_required; 3932 } 3933 3934 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 3935 { 3936 int ret = 0; 3937 3938 ret = vega20_disable_all_smu_features(hwmgr); 3939 PP_ASSERT_WITH_CODE(!ret, 3940 "[DisableDpmTasks] Failed to disable all smu features!", 3941 return ret); 3942 3943 return 0; 3944 } 3945 3946 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr) 3947 { 3948 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3949 int result; 3950 3951 result = vega20_disable_dpm_tasks(hwmgr); 3952 PP_ASSERT_WITH_CODE((0 == result), 3953 "[PowerOffAsic] Failed to disable DPM!", 3954 ); 3955 data->water_marks_bitmap &= ~(WaterMarksLoaded); 3956 3957 return result; 3958 } 3959 3960 static int conv_power_profile_to_pplib_workload(int power_profile) 3961 { 3962 int pplib_workload = 0; 3963 3964 switch (power_profile) { 3965 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: 3966 pplib_workload = WORKLOAD_DEFAULT_BIT; 3967 break; 3968 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 3969 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 3970 break; 3971 case PP_SMC_POWER_PROFILE_POWERSAVING: 3972 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; 3973 break; 3974 case PP_SMC_POWER_PROFILE_VIDEO: 3975 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; 3976 break; 3977 case PP_SMC_POWER_PROFILE_VR: 3978 pplib_workload = WORKLOAD_PPLIB_VR_BIT; 3979 break; 3980 case PP_SMC_POWER_PROFILE_COMPUTE: 3981 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; 3982 break; 3983 case PP_SMC_POWER_PROFILE_CUSTOM: 3984 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT; 3985 break; 3986 } 3987 3988 return pplib_workload; 3989 } 3990 3991 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 3992 { 3993 DpmActivityMonitorCoeffInt_t activity_monitor; 3994 uint32_t i, size = 0; 3995 uint16_t workload_type = 0; 3996 static const char *title[] = { 3997 "PROFILE_INDEX(NAME)", 3998 "CLOCK_TYPE(NAME)", 3999 "FPS", 4000 "UseRlcBusy", 4001 "MinActiveFreqType", 4002 "MinActiveFreq", 4003 "BoosterFreqType", 4004 "BoosterFreq", 4005 "PD_Data_limit_c", 4006 "PD_Data_error_coeff", 4007 "PD_Data_error_rate_coeff"}; 4008 int result = 0; 4009 4010 if (!buf) 4011 return -EINVAL; 4012 4013 phm_get_sysfs_buf(&buf, &size); 4014 4015 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", 4016 title[0], title[1], title[2], title[3], title[4], title[5], 4017 title[6], title[7], title[8], title[9], title[10]); 4018 4019 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 4020 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 4021 workload_type = conv_power_profile_to_pplib_workload(i); 4022 result = vega20_get_activity_monitor_coeff(hwmgr, 4023 (uint8_t *)(&activity_monitor), workload_type); 4024 PP_ASSERT_WITH_CODE(!result, 4025 "[GetPowerProfile] Failed to get activity monitor!", 4026 return result); 4027 4028 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", 4029 i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); 4030 4031 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4032 " ", 4033 0, 4034 "GFXCLK", 4035 activity_monitor.Gfx_FPS, 4036 activity_monitor.Gfx_UseRlcBusy, 4037 activity_monitor.Gfx_MinActiveFreqType, 4038 activity_monitor.Gfx_MinActiveFreq, 4039 activity_monitor.Gfx_BoosterFreqType, 4040 activity_monitor.Gfx_BoosterFreq, 4041 activity_monitor.Gfx_PD_Data_limit_c, 4042 activity_monitor.Gfx_PD_Data_error_coeff, 4043 activity_monitor.Gfx_PD_Data_error_rate_coeff); 4044 4045 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4046 " ", 4047 1, 4048 "SOCCLK", 4049 activity_monitor.Soc_FPS, 4050 activity_monitor.Soc_UseRlcBusy, 4051 activity_monitor.Soc_MinActiveFreqType, 4052 activity_monitor.Soc_MinActiveFreq, 4053 activity_monitor.Soc_BoosterFreqType, 4054 activity_monitor.Soc_BoosterFreq, 4055 activity_monitor.Soc_PD_Data_limit_c, 4056 activity_monitor.Soc_PD_Data_error_coeff, 4057 activity_monitor.Soc_PD_Data_error_rate_coeff); 4058 4059 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4060 " ", 4061 2, 4062 "UCLK", 4063 activity_monitor.Mem_FPS, 4064 activity_monitor.Mem_UseRlcBusy, 4065 activity_monitor.Mem_MinActiveFreqType, 4066 activity_monitor.Mem_MinActiveFreq, 4067 activity_monitor.Mem_BoosterFreqType, 4068 activity_monitor.Mem_BoosterFreq, 4069 activity_monitor.Mem_PD_Data_limit_c, 4070 activity_monitor.Mem_PD_Data_error_coeff, 4071 activity_monitor.Mem_PD_Data_error_rate_coeff); 4072 4073 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4074 " ", 4075 3, 4076 "FCLK", 4077 activity_monitor.Fclk_FPS, 4078 activity_monitor.Fclk_UseRlcBusy, 4079 activity_monitor.Fclk_MinActiveFreqType, 4080 activity_monitor.Fclk_MinActiveFreq, 4081 activity_monitor.Fclk_BoosterFreqType, 4082 activity_monitor.Fclk_BoosterFreq, 4083 activity_monitor.Fclk_PD_Data_limit_c, 4084 activity_monitor.Fclk_PD_Data_error_coeff, 4085 activity_monitor.Fclk_PD_Data_error_rate_coeff); 4086 } 4087 4088 return size; 4089 } 4090 4091 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 4092 { 4093 DpmActivityMonitorCoeffInt_t activity_monitor; 4094 int workload_type, result = 0; 4095 uint32_t power_profile_mode = input[size]; 4096 4097 if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 4098 pr_err("Invalid power profile mode %d\n", power_profile_mode); 4099 return -EINVAL; 4100 } 4101 4102 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 4103 struct vega20_hwmgr *data = 4104 (struct vega20_hwmgr *)(hwmgr->backend); 4105 4106 if (size != 10 && size != 0) 4107 return -EINVAL; 4108 4109 if (size == 0 && !data->is_custom_profile_set) 4110 return -EINVAL; 4111 4112 result = vega20_get_activity_monitor_coeff(hwmgr, 4113 (uint8_t *)(&activity_monitor), 4114 WORKLOAD_PPLIB_CUSTOM_BIT); 4115 PP_ASSERT_WITH_CODE(!result, 4116 "[SetPowerProfile] Failed to get activity monitor!", 4117 return result); 4118 4119 /* If size==0, then we want to apply the already-configured 4120 * CUSTOM profile again. Just apply it, since we checked its 4121 * validity above 4122 */ 4123 if (size == 0) 4124 goto out; 4125 4126 switch (input[0]) { 4127 case 0: /* Gfxclk */ 4128 activity_monitor.Gfx_FPS = input[1]; 4129 activity_monitor.Gfx_UseRlcBusy = input[2]; 4130 activity_monitor.Gfx_MinActiveFreqType = input[3]; 4131 activity_monitor.Gfx_MinActiveFreq = input[4]; 4132 activity_monitor.Gfx_BoosterFreqType = input[5]; 4133 activity_monitor.Gfx_BoosterFreq = input[6]; 4134 activity_monitor.Gfx_PD_Data_limit_c = input[7]; 4135 activity_monitor.Gfx_PD_Data_error_coeff = input[8]; 4136 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; 4137 break; 4138 case 1: /* Socclk */ 4139 activity_monitor.Soc_FPS = input[1]; 4140 activity_monitor.Soc_UseRlcBusy = input[2]; 4141 activity_monitor.Soc_MinActiveFreqType = input[3]; 4142 activity_monitor.Soc_MinActiveFreq = input[4]; 4143 activity_monitor.Soc_BoosterFreqType = input[5]; 4144 activity_monitor.Soc_BoosterFreq = input[6]; 4145 activity_monitor.Soc_PD_Data_limit_c = input[7]; 4146 activity_monitor.Soc_PD_Data_error_coeff = input[8]; 4147 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; 4148 break; 4149 case 2: /* Uclk */ 4150 activity_monitor.Mem_FPS = input[1]; 4151 activity_monitor.Mem_UseRlcBusy = input[2]; 4152 activity_monitor.Mem_MinActiveFreqType = input[3]; 4153 activity_monitor.Mem_MinActiveFreq = input[4]; 4154 activity_monitor.Mem_BoosterFreqType = input[5]; 4155 activity_monitor.Mem_BoosterFreq = input[6]; 4156 activity_monitor.Mem_PD_Data_limit_c = input[7]; 4157 activity_monitor.Mem_PD_Data_error_coeff = input[8]; 4158 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; 4159 break; 4160 case 3: /* Fclk */ 4161 activity_monitor.Fclk_FPS = input[1]; 4162 activity_monitor.Fclk_UseRlcBusy = input[2]; 4163 activity_monitor.Fclk_MinActiveFreqType = input[3]; 4164 activity_monitor.Fclk_MinActiveFreq = input[4]; 4165 activity_monitor.Fclk_BoosterFreqType = input[5]; 4166 activity_monitor.Fclk_BoosterFreq = input[6]; 4167 activity_monitor.Fclk_PD_Data_limit_c = input[7]; 4168 activity_monitor.Fclk_PD_Data_error_coeff = input[8]; 4169 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; 4170 break; 4171 default: 4172 return -EINVAL; 4173 } 4174 4175 result = vega20_set_activity_monitor_coeff(hwmgr, 4176 (uint8_t *)(&activity_monitor), 4177 WORKLOAD_PPLIB_CUSTOM_BIT); 4178 data->is_custom_profile_set = true; 4179 PP_ASSERT_WITH_CODE(!result, 4180 "[SetPowerProfile] Failed to set activity monitor!", 4181 return result); 4182 } 4183 4184 out: 4185 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 4186 workload_type = 4187 conv_power_profile_to_pplib_workload(power_profile_mode); 4188 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, 4189 1 << workload_type, 4190 NULL); 4191 4192 hwmgr->power_profile_mode = power_profile_mode; 4193 4194 return 0; 4195 } 4196 4197 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 4198 uint32_t virtual_addr_low, 4199 uint32_t virtual_addr_hi, 4200 uint32_t mc_addr_low, 4201 uint32_t mc_addr_hi, 4202 uint32_t size) 4203 { 4204 smum_send_msg_to_smc_with_parameter(hwmgr, 4205 PPSMC_MSG_SetSystemVirtualDramAddrHigh, 4206 virtual_addr_hi, 4207 NULL); 4208 smum_send_msg_to_smc_with_parameter(hwmgr, 4209 PPSMC_MSG_SetSystemVirtualDramAddrLow, 4210 virtual_addr_low, 4211 NULL); 4212 smum_send_msg_to_smc_with_parameter(hwmgr, 4213 PPSMC_MSG_DramLogSetDramAddrHigh, 4214 mc_addr_hi, 4215 NULL); 4216 4217 smum_send_msg_to_smc_with_parameter(hwmgr, 4218 PPSMC_MSG_DramLogSetDramAddrLow, 4219 mc_addr_low, 4220 NULL); 4221 4222 smum_send_msg_to_smc_with_parameter(hwmgr, 4223 PPSMC_MSG_DramLogSetDramSize, 4224 size, 4225 NULL); 4226 return 0; 4227 } 4228 4229 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 4230 struct PP_TemperatureRange *thermal_data) 4231 { 4232 struct phm_ppt_v3_information *pptable_information = 4233 (struct phm_ppt_v3_information *)hwmgr->pptable; 4234 struct vega20_hwmgr *data = 4235 (struct vega20_hwmgr *)(hwmgr->backend); 4236 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 4237 4238 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange)); 4239 4240 thermal_data->max = pp_table->TedgeLimit * 4241 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4242 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) * 4243 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4244 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit * 4245 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4246 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) * 4247 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4248 thermal_data->mem_crit_max = pp_table->ThbmLimit * 4249 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4250 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)* 4251 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4252 thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp * 4253 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4254 4255 return 0; 4256 } 4257 4258 static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire) 4259 { 4260 int res; 4261 4262 /* I2C bus access can happen very early, when SMU not loaded yet */ 4263 if (!vega20_is_smc_ram_running(hwmgr)) 4264 return 0; 4265 4266 res = smum_send_msg_to_smc_with_parameter(hwmgr, 4267 (acquire ? 4268 PPSMC_MSG_RequestI2CBus : 4269 PPSMC_MSG_ReleaseI2CBus), 4270 0, 4271 NULL); 4272 4273 PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res); 4274 return res; 4275 } 4276 4277 static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr, 4278 enum pp_df_cstate state) 4279 { 4280 int ret; 4281 4282 /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ 4283 if (hwmgr->smu_version < 0x283200) { 4284 pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); 4285 return -EINVAL; 4286 } 4287 4288 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state, 4289 NULL); 4290 if (ret) 4291 pr_err("SetDfCstate failed!\n"); 4292 4293 return ret; 4294 } 4295 4296 static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr, 4297 uint32_t pstate) 4298 { 4299 int ret; 4300 4301 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 4302 PPSMC_MSG_SetXgmiMode, 4303 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 4304 NULL); 4305 if (ret) 4306 pr_err("SetXgmiPstate failed!\n"); 4307 4308 return ret; 4309 } 4310 4311 static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics) 4312 { 4313 memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0)); 4314 4315 gpu_metrics->common_header.structure_size = 4316 sizeof(struct gpu_metrics_v1_0); 4317 gpu_metrics->common_header.format_revision = 1; 4318 gpu_metrics->common_header.content_revision = 0; 4319 4320 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 4321 } 4322 4323 static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr, 4324 void **table) 4325 { 4326 struct vega20_hwmgr *data = 4327 (struct vega20_hwmgr *)(hwmgr->backend); 4328 struct gpu_metrics_v1_0 *gpu_metrics = 4329 &data->gpu_metrics_table; 4330 SmuMetrics_t metrics; 4331 uint32_t fan_speed_rpm; 4332 int ret; 4333 4334 ret = vega20_get_metrics_table(hwmgr, &metrics, true); 4335 if (ret) 4336 return ret; 4337 4338 vega20_init_gpu_metrics_v1_0(gpu_metrics); 4339 4340 gpu_metrics->temperature_edge = metrics.TemperatureEdge; 4341 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; 4342 gpu_metrics->temperature_mem = metrics.TemperatureHBM; 4343 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; 4344 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; 4345 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; 4346 4347 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; 4348 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; 4349 4350 gpu_metrics->average_socket_power = metrics.AverageSocketPower; 4351 4352 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; 4353 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; 4354 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; 4355 4356 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; 4357 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; 4358 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; 4359 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; 4360 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 4361 4362 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 4363 4364 vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm); 4365 gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm; 4366 4367 gpu_metrics->pcie_link_width = 4368 vega20_get_current_pcie_link_width(hwmgr); 4369 gpu_metrics->pcie_link_speed = 4370 vega20_get_current_pcie_link_speed(hwmgr); 4371 4372 *table = (void *)gpu_metrics; 4373 4374 return sizeof(struct gpu_metrics_v1_0); 4375 } 4376 4377 static const struct pp_hwmgr_func vega20_hwmgr_funcs = { 4378 /* init/fini related */ 4379 .backend_init = vega20_hwmgr_backend_init, 4380 .backend_fini = vega20_hwmgr_backend_fini, 4381 .asic_setup = vega20_setup_asic_task, 4382 .power_off_asic = vega20_power_off_asic, 4383 .dynamic_state_management_enable = vega20_enable_dpm_tasks, 4384 .dynamic_state_management_disable = vega20_disable_dpm_tasks, 4385 /* power state related */ 4386 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules, 4387 .pre_display_config_changed = vega20_pre_display_configuration_changed_task, 4388 .display_config_changed = vega20_display_configuration_changed_task, 4389 .check_smc_update_required_for_display_configuration = 4390 vega20_check_smc_update_required_for_display_configuration, 4391 .notify_smc_display_config_after_ps_adjustment = 4392 vega20_notify_smc_display_config_after_ps_adjustment, 4393 /* export to DAL */ 4394 .get_sclk = vega20_dpm_get_sclk, 4395 .get_mclk = vega20_dpm_get_mclk, 4396 .get_dal_power_level = vega20_get_dal_power_level, 4397 .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency, 4398 .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage, 4399 .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges, 4400 .display_clock_voltage_request = vega20_display_clock_voltage_request, 4401 .get_performance_level = vega20_get_performance_level, 4402 /* UMD pstate, profile related */ 4403 .force_dpm_level = vega20_dpm_force_dpm_level, 4404 .get_power_profile_mode = vega20_get_power_profile_mode, 4405 .set_power_profile_mode = vega20_set_power_profile_mode, 4406 /* od related */ 4407 .set_power_limit = vega20_set_power_limit, 4408 .get_sclk_od = vega20_get_sclk_od, 4409 .set_sclk_od = vega20_set_sclk_od, 4410 .get_mclk_od = vega20_get_mclk_od, 4411 .set_mclk_od = vega20_set_mclk_od, 4412 .odn_edit_dpm_table = vega20_odn_edit_dpm_table, 4413 /* for sysfs to retrive/set gfxclk/memclk */ 4414 .force_clock_level = vega20_force_clock_level, 4415 .print_clock_levels = vega20_print_clock_levels, 4416 .read_sensor = vega20_read_sensor, 4417 .get_ppfeature_status = vega20_get_ppfeature_status, 4418 .set_ppfeature_status = vega20_set_ppfeature_status, 4419 /* powergate related */ 4420 .powergate_uvd = vega20_power_gate_uvd, 4421 .powergate_vce = vega20_power_gate_vce, 4422 /* thermal related */ 4423 .start_thermal_controller = vega20_start_thermal_controller, 4424 .stop_thermal_controller = vega20_thermal_stop_thermal_controller, 4425 .get_thermal_temperature_range = vega20_get_thermal_temperature_range, 4426 .register_irq_handlers = smu9_register_irq_handlers, 4427 .disable_smc_firmware_ctf = vega20_thermal_disable_alert, 4428 /* fan control related */ 4429 .get_fan_speed_pwm = vega20_fan_ctrl_get_fan_speed_pwm, 4430 .set_fan_speed_pwm = vega20_fan_ctrl_set_fan_speed_pwm, 4431 .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info, 4432 .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm, 4433 .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm, 4434 .get_fan_control_mode = vega20_get_fan_control_mode, 4435 .set_fan_control_mode = vega20_set_fan_control_mode, 4436 /* smu memory related */ 4437 .notify_cac_buffer_info = vega20_notify_cac_buffer_info, 4438 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, 4439 /* BACO related */ 4440 .get_bamaco_support = vega20_get_bamaco_support, 4441 .get_asic_baco_state = vega20_baco_get_state, 4442 .set_asic_baco_state = vega20_baco_set_state, 4443 .set_mp1_state = vega20_set_mp1_state, 4444 .smu_i2c_bus_access = vega20_smu_i2c_bus_access, 4445 .set_df_cstate = vega20_set_df_cstate, 4446 .set_xgmi_pstate = vega20_set_xgmi_pstate, 4447 .get_gpu_metrics = vega20_get_gpu_metrics, 4448 }; 4449 4450 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) 4451 { 4452 hwmgr->hwmgr_func = &vega20_hwmgr_funcs; 4453 hwmgr->pptable_func = &vega20_pptable_funcs; 4454 4455 return 0; 4456 } 4457