1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/delay.h> 25 #include <linux/fb.h> 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 29 #include "hwmgr.h" 30 #include "amd_powerplay.h" 31 #include "vega20_smumgr.h" 32 #include "hardwaremanager.h" 33 #include "ppatomfwctrl.h" 34 #include "atomfirmware.h" 35 #include "cgs_common.h" 36 #include "vega20_powertune.h" 37 #include "vega20_inc.h" 38 #include "pppcielanes.h" 39 #include "vega20_hwmgr.h" 40 #include "vega20_processpptables.h" 41 #include "vega20_pptable.h" 42 #include "vega20_thermal.h" 43 #include "vega20_ppsmc.h" 44 #include "pp_debug.h" 45 #include "amd_pcie_helpers.h" 46 #include "ppinterrupt.h" 47 #include "pp_overdriver.h" 48 #include "pp_thermal.h" 49 #include "soc15_common.h" 50 #include "vega20_baco.h" 51 #include "smuio/smuio_9_0_offset.h" 52 #include "smuio/smuio_9_0_sh_mask.h" 53 #include "nbio/nbio_7_4_sh_mask.h" 54 55 #define smnPCIE_LC_SPEED_CNTL 0x11140290 56 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 57 58 #define LINK_WIDTH_MAX 6 59 #define LINK_SPEED_MAX 3 60 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; 61 static const int link_speed[] = {25, 50, 80, 160}; 62 63 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) 64 { 65 struct vega20_hwmgr *data = 66 (struct vega20_hwmgr *)(hwmgr->backend); 67 68 data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT; 69 data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT; 70 data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT; 71 data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT; 72 data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT; 73 74 data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT; 75 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 76 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 77 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 78 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 79 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 80 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 81 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 82 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 83 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 84 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 85 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 86 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; 87 88 /* 89 * Disable the following features for now: 90 * GFXCLK DS 91 * SOCLK DS 92 * LCLK DS 93 * DCEFCLK DS 94 * FCLK DS 95 * MP1CLK DS 96 * MP0CLK DS 97 */ 98 data->registry_data.disallowed_features = 0xE0041C00; 99 /* ECC feature should be disabled on old SMUs */ 100 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); 101 if (hwmgr->smu_version < 0x282100) 102 data->registry_data.disallowed_features |= FEATURE_ECC_MASK; 103 104 if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK)) 105 data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK; 106 107 if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK)) 108 data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK; 109 110 if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK)) 111 data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK; 112 113 if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK)) 114 data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK; 115 116 if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK)) 117 data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK; 118 119 if (!(hwmgr->feature_mask & PP_ULV_MASK)) 120 data->registry_data.disallowed_features |= FEATURE_ULV_MASK; 121 122 if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)) 123 data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK; 124 125 data->registry_data.od_state_in_dc_support = 0; 126 data->registry_data.thermal_support = 1; 127 data->registry_data.skip_baco_hardware = 0; 128 129 data->registry_data.log_avfs_param = 0; 130 data->registry_data.sclk_throttle_low_notification = 1; 131 data->registry_data.force_dpm_high = 0; 132 data->registry_data.stable_pstate_sclk_dpm_percentage = 75; 133 134 data->registry_data.didt_support = 0; 135 if (data->registry_data.didt_support) { 136 data->registry_data.didt_mode = 6; 137 data->registry_data.sq_ramping_support = 1; 138 data->registry_data.db_ramping_support = 0; 139 data->registry_data.td_ramping_support = 0; 140 data->registry_data.tcp_ramping_support = 0; 141 data->registry_data.dbr_ramping_support = 0; 142 data->registry_data.edc_didt_support = 1; 143 data->registry_data.gc_didt_support = 0; 144 data->registry_data.psm_didt_support = 0; 145 } 146 147 data->registry_data.pcie_lane_override = 0xff; 148 data->registry_data.pcie_speed_override = 0xff; 149 data->registry_data.pcie_clock_override = 0xffffffff; 150 data->registry_data.regulator_hot_gpio_support = 1; 151 data->registry_data.ac_dc_switch_gpio_support = 0; 152 data->registry_data.quick_transition_support = 0; 153 data->registry_data.zrpm_start_temp = 0xffff; 154 data->registry_data.zrpm_stop_temp = 0xffff; 155 data->registry_data.od8_feature_enable = 1; 156 data->registry_data.disable_water_mark = 0; 157 data->registry_data.disable_pp_tuning = 0; 158 data->registry_data.disable_xlpp_tuning = 0; 159 data->registry_data.disable_workload_policy = 0; 160 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F; 161 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919; 162 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A; 163 data->registry_data.force_workload_policy_mask = 0; 164 data->registry_data.disable_3d_fs_detection = 0; 165 data->registry_data.fps_support = 1; 166 data->registry_data.disable_auto_wattman = 1; 167 data->registry_data.auto_wattman_debug = 0; 168 data->registry_data.auto_wattman_sample_period = 100; 169 data->registry_data.fclk_gfxclk_ratio = 0; 170 data->registry_data.auto_wattman_threshold = 50; 171 data->registry_data.gfxoff_controlled_by_driver = 1; 172 data->gfxoff_allowed = false; 173 data->counter_gfxoff = 0; 174 data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); 175 } 176 177 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) 178 { 179 struct vega20_hwmgr *data = 180 (struct vega20_hwmgr *)(hwmgr->backend); 181 struct amdgpu_device *adev = hwmgr->adev; 182 183 if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE) 184 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 185 PHM_PlatformCaps_ControlVDDCI); 186 187 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 188 PHM_PlatformCaps_TablelessHardwareInterface); 189 190 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 191 PHM_PlatformCaps_BACO); 192 193 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 194 PHM_PlatformCaps_EnableSMU7ThermalManagement); 195 196 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 197 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 198 PHM_PlatformCaps_UVDPowerGating); 199 200 if (adev->pg_flags & AMD_PG_SUPPORT_VCE) 201 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 202 PHM_PlatformCaps_VCEPowerGating); 203 204 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 205 PHM_PlatformCaps_UnTabledHardwareInterface); 206 207 if (data->registry_data.od8_feature_enable) 208 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 209 PHM_PlatformCaps_OD8inACSupport); 210 211 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 212 PHM_PlatformCaps_ActivityReporting); 213 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 214 PHM_PlatformCaps_FanSpeedInTableIsRPM); 215 216 if (data->registry_data.od_state_in_dc_support) { 217 if (data->registry_data.od8_feature_enable) 218 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 219 PHM_PlatformCaps_OD8inDCSupport); 220 } 221 222 if (data->registry_data.thermal_support && 223 data->registry_data.fuzzy_fan_control_support && 224 hwmgr->thermal_controller.advanceFanControlParameters.usTMax) 225 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 226 PHM_PlatformCaps_ODFuzzyFanControlSupport); 227 228 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 229 PHM_PlatformCaps_DynamicPowerManagement); 230 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 231 PHM_PlatformCaps_SMC); 232 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 233 PHM_PlatformCaps_ThermalPolicyDelay); 234 235 if (data->registry_data.force_dpm_high) 236 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 237 PHM_PlatformCaps_ExclusiveModeAlwaysHigh); 238 239 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 240 PHM_PlatformCaps_DynamicUVDState); 241 242 if (data->registry_data.sclk_throttle_low_notification) 243 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 244 PHM_PlatformCaps_SclkThrottleLowNotification); 245 246 /* power tune caps */ 247 /* assume disabled */ 248 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 249 PHM_PlatformCaps_PowerContainment); 250 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 251 PHM_PlatformCaps_DiDtSupport); 252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 253 PHM_PlatformCaps_SQRamping); 254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 255 PHM_PlatformCaps_DBRamping); 256 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 257 PHM_PlatformCaps_TDRamping); 258 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 259 PHM_PlatformCaps_TCPRamping); 260 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 261 PHM_PlatformCaps_DBRRamping); 262 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 263 PHM_PlatformCaps_DiDtEDCEnable); 264 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 265 PHM_PlatformCaps_GCEDC); 266 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 267 PHM_PlatformCaps_PSM); 268 269 if (data->registry_data.didt_support) { 270 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 271 PHM_PlatformCaps_DiDtSupport); 272 if (data->registry_data.sq_ramping_support) 273 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 274 PHM_PlatformCaps_SQRamping); 275 if (data->registry_data.db_ramping_support) 276 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 277 PHM_PlatformCaps_DBRamping); 278 if (data->registry_data.td_ramping_support) 279 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 280 PHM_PlatformCaps_TDRamping); 281 if (data->registry_data.tcp_ramping_support) 282 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 283 PHM_PlatformCaps_TCPRamping); 284 if (data->registry_data.dbr_ramping_support) 285 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 286 PHM_PlatformCaps_DBRRamping); 287 if (data->registry_data.edc_didt_support) 288 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 289 PHM_PlatformCaps_DiDtEDCEnable); 290 if (data->registry_data.gc_didt_support) 291 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 292 PHM_PlatformCaps_GCEDC); 293 if (data->registry_data.psm_didt_support) 294 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 295 PHM_PlatformCaps_PSM); 296 } 297 298 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 299 PHM_PlatformCaps_RegulatorHot); 300 301 if (data->registry_data.ac_dc_switch_gpio_support) { 302 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 303 PHM_PlatformCaps_AutomaticDCTransition); 304 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 305 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); 306 } 307 308 if (data->registry_data.quick_transition_support) { 309 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 310 PHM_PlatformCaps_AutomaticDCTransition); 311 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 312 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); 313 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 314 PHM_PlatformCaps_Falcon_QuickTransition); 315 } 316 317 if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) { 318 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 319 PHM_PlatformCaps_LowestUclkReservedForUlv); 320 if (data->lowest_uclk_reserved_for_ulv == 1) 321 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 322 PHM_PlatformCaps_LowestUclkReservedForUlv); 323 } 324 325 if (data->registry_data.custom_fan_support) 326 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 327 PHM_PlatformCaps_CustomFanControlSupport); 328 329 return 0; 330 } 331 332 static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) 333 { 334 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 335 struct amdgpu_device *adev = hwmgr->adev; 336 uint32_t top32, bottom32; 337 int i; 338 339 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = 340 FEATURE_DPM_PREFETCHER_BIT; 341 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id = 342 FEATURE_DPM_GFXCLK_BIT; 343 data->smu_features[GNLD_DPM_UCLK].smu_feature_id = 344 FEATURE_DPM_UCLK_BIT; 345 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id = 346 FEATURE_DPM_SOCCLK_BIT; 347 data->smu_features[GNLD_DPM_UVD].smu_feature_id = 348 FEATURE_DPM_UVD_BIT; 349 data->smu_features[GNLD_DPM_VCE].smu_feature_id = 350 FEATURE_DPM_VCE_BIT; 351 data->smu_features[GNLD_ULV].smu_feature_id = 352 FEATURE_ULV_BIT; 353 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id = 354 FEATURE_DPM_MP0CLK_BIT; 355 data->smu_features[GNLD_DPM_LINK].smu_feature_id = 356 FEATURE_DPM_LINK_BIT; 357 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id = 358 FEATURE_DPM_DCEFCLK_BIT; 359 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id = 360 FEATURE_DS_GFXCLK_BIT; 361 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id = 362 FEATURE_DS_SOCCLK_BIT; 363 data->smu_features[GNLD_DS_LCLK].smu_feature_id = 364 FEATURE_DS_LCLK_BIT; 365 data->smu_features[GNLD_PPT].smu_feature_id = 366 FEATURE_PPT_BIT; 367 data->smu_features[GNLD_TDC].smu_feature_id = 368 FEATURE_TDC_BIT; 369 data->smu_features[GNLD_THERMAL].smu_feature_id = 370 FEATURE_THERMAL_BIT; 371 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id = 372 FEATURE_GFX_PER_CU_CG_BIT; 373 data->smu_features[GNLD_RM].smu_feature_id = 374 FEATURE_RM_BIT; 375 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id = 376 FEATURE_DS_DCEFCLK_BIT; 377 data->smu_features[GNLD_ACDC].smu_feature_id = 378 FEATURE_ACDC_BIT; 379 data->smu_features[GNLD_VR0HOT].smu_feature_id = 380 FEATURE_VR0HOT_BIT; 381 data->smu_features[GNLD_VR1HOT].smu_feature_id = 382 FEATURE_VR1HOT_BIT; 383 data->smu_features[GNLD_FW_CTF].smu_feature_id = 384 FEATURE_FW_CTF_BIT; 385 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id = 386 FEATURE_LED_DISPLAY_BIT; 387 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = 388 FEATURE_FAN_CONTROL_BIT; 389 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; 390 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT; 391 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT; 392 data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT; 393 data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT; 394 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; 395 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; 396 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; 397 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT; 398 399 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 400 data->smu_features[i].smu_feature_bitmap = 401 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id); 402 data->smu_features[i].allowed = 403 ((data->registry_data.disallowed_features >> i) & 1) ? 404 false : true; 405 } 406 407 /* Get the SN to turn into a Unique ID */ 408 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 409 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 410 411 adev->unique_id = ((uint64_t)bottom32 << 32) | top32; 412 } 413 414 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) 415 { 416 return 0; 417 } 418 419 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 420 { 421 kfree(hwmgr->backend); 422 hwmgr->backend = NULL; 423 424 return 0; 425 } 426 427 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 428 { 429 struct vega20_hwmgr *data; 430 struct amdgpu_device *adev = hwmgr->adev; 431 432 data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL); 433 if (data == NULL) 434 return -ENOMEM; 435 436 hwmgr->backend = data; 437 438 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 439 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 440 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 441 442 vega20_set_default_registry_data(hwmgr); 443 444 data->disable_dpm_mask = 0xff; 445 446 /* need to set voltage control types before EVV patching */ 447 data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE; 448 data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE; 449 data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE; 450 451 data->water_marks_bitmap = 0; 452 data->avfs_exist = false; 453 454 vega20_set_features_platform_caps(hwmgr); 455 456 vega20_init_dpm_defaults(hwmgr); 457 458 /* Parse pptable data read from VBIOS */ 459 vega20_set_private_data_based_on_pptable(hwmgr); 460 461 data->is_tlu_enabled = false; 462 463 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 464 VEGA20_MAX_HARDWARE_POWERLEVELS; 465 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 466 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 467 468 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ 469 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ 470 hwmgr->platform_descriptor.clockStep.engineClock = 500; 471 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 472 473 data->total_active_cus = adev->gfx.cu_info.number; 474 data->is_custom_profile_set = false; 475 476 return 0; 477 } 478 479 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr) 480 { 481 struct vega20_hwmgr *data = 482 (struct vega20_hwmgr *)(hwmgr->backend); 483 484 data->low_sclk_interrupt_threshold = 0; 485 486 return 0; 487 } 488 489 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) 490 { 491 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 492 int ret = 0; 493 bool use_baco = (amdgpu_in_reset(adev) && 494 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 495 (adev->in_runpm && amdgpu_asic_supports_baco(adev)); 496 497 ret = vega20_init_sclk_threshold(hwmgr); 498 PP_ASSERT_WITH_CODE(!ret, 499 "Failed to init sclk threshold!", 500 return ret); 501 502 if (use_baco) { 503 ret = vega20_baco_apply_vdci_flush_workaround(hwmgr); 504 if (ret) 505 pr_err("Failed to apply vega20 baco workaround!\n"); 506 } 507 508 return ret; 509 } 510 511 /* 512 * @fn vega20_init_dpm_state 513 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. 514 * 515 * @param dpm_state - the address of the DPM Table to initiailize. 516 * @return None. 517 */ 518 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) 519 { 520 dpm_state->soft_min_level = 0x0; 521 dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT; 522 dpm_state->hard_min_level = 0x0; 523 dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT; 524 } 525 526 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, 527 PPCLK_e clk_id, uint32_t *num_of_levels) 528 { 529 int ret = 0; 530 531 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 532 PPSMC_MSG_GetDpmFreqByIndex, 533 (clk_id << 16 | 0xFF), 534 num_of_levels); 535 PP_ASSERT_WITH_CODE(!ret, 536 "[GetNumOfDpmLevel] failed to get dpm levels!", 537 return ret); 538 539 return ret; 540 } 541 542 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, 543 PPCLK_e clk_id, uint32_t index, uint32_t *clk) 544 { 545 int ret = 0; 546 547 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 548 PPSMC_MSG_GetDpmFreqByIndex, 549 (clk_id << 16 | index), 550 clk); 551 PP_ASSERT_WITH_CODE(!ret, 552 "[GetDpmFreqByIndex] failed to get dpm freq by index!", 553 return ret); 554 555 return ret; 556 } 557 558 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr, 559 struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id) 560 { 561 int ret = 0; 562 uint32_t i, num_of_levels, clk; 563 564 ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels); 565 PP_ASSERT_WITH_CODE(!ret, 566 "[SetupSingleDpmTable] failed to get clk levels!", 567 return ret); 568 569 dpm_table->count = num_of_levels; 570 571 for (i = 0; i < num_of_levels; i++) { 572 ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk); 573 PP_ASSERT_WITH_CODE(!ret, 574 "[SetupSingleDpmTable] failed to get clk of specific level!", 575 return ret); 576 dpm_table->dpm_levels[i].value = clk; 577 dpm_table->dpm_levels[i].enabled = true; 578 } 579 580 return ret; 581 } 582 583 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr) 584 { 585 struct vega20_hwmgr *data = 586 (struct vega20_hwmgr *)(hwmgr->backend); 587 struct vega20_single_dpm_table *dpm_table; 588 int ret = 0; 589 590 dpm_table = &(data->dpm_table.gfx_table); 591 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { 592 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK); 593 PP_ASSERT_WITH_CODE(!ret, 594 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!", 595 return ret); 596 } else { 597 dpm_table->count = 1; 598 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100; 599 } 600 601 return ret; 602 } 603 604 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr) 605 { 606 struct vega20_hwmgr *data = 607 (struct vega20_hwmgr *)(hwmgr->backend); 608 struct vega20_single_dpm_table *dpm_table; 609 int ret = 0; 610 611 dpm_table = &(data->dpm_table.mem_table); 612 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 613 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK); 614 PP_ASSERT_WITH_CODE(!ret, 615 "[SetupDefaultDpmTable] failed to get memclk dpm levels!", 616 return ret); 617 } else { 618 dpm_table->count = 1; 619 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100; 620 } 621 622 return ret; 623 } 624 625 /* 626 * This function is to initialize all DPM state tables 627 * for SMU based on the dependency table. 628 * Dynamic state patching function will then trim these 629 * state tables to the allowed range based 630 * on the power policy or external client requests, 631 * such as UVD request, etc. 632 */ 633 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 634 { 635 struct vega20_hwmgr *data = 636 (struct vega20_hwmgr *)(hwmgr->backend); 637 struct vega20_single_dpm_table *dpm_table; 638 int ret = 0; 639 640 memset(&data->dpm_table, 0, sizeof(data->dpm_table)); 641 642 /* socclk */ 643 dpm_table = &(data->dpm_table.soc_table); 644 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { 645 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK); 646 PP_ASSERT_WITH_CODE(!ret, 647 "[SetupDefaultDpmTable] failed to get socclk dpm levels!", 648 return ret); 649 } else { 650 dpm_table->count = 1; 651 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100; 652 } 653 vega20_init_dpm_state(&(dpm_table->dpm_state)); 654 655 /* gfxclk */ 656 dpm_table = &(data->dpm_table.gfx_table); 657 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 658 if (ret) 659 return ret; 660 vega20_init_dpm_state(&(dpm_table->dpm_state)); 661 662 /* memclk */ 663 dpm_table = &(data->dpm_table.mem_table); 664 ret = vega20_setup_memclk_dpm_table(hwmgr); 665 if (ret) 666 return ret; 667 vega20_init_dpm_state(&(dpm_table->dpm_state)); 668 669 /* eclk */ 670 dpm_table = &(data->dpm_table.eclk_table); 671 if (data->smu_features[GNLD_DPM_VCE].enabled) { 672 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK); 673 PP_ASSERT_WITH_CODE(!ret, 674 "[SetupDefaultDpmTable] failed to get eclk dpm levels!", 675 return ret); 676 } else { 677 dpm_table->count = 1; 678 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100; 679 } 680 vega20_init_dpm_state(&(dpm_table->dpm_state)); 681 682 /* vclk */ 683 dpm_table = &(data->dpm_table.vclk_table); 684 if (data->smu_features[GNLD_DPM_UVD].enabled) { 685 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK); 686 PP_ASSERT_WITH_CODE(!ret, 687 "[SetupDefaultDpmTable] failed to get vclk dpm levels!", 688 return ret); 689 } else { 690 dpm_table->count = 1; 691 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100; 692 } 693 vega20_init_dpm_state(&(dpm_table->dpm_state)); 694 695 /* dclk */ 696 dpm_table = &(data->dpm_table.dclk_table); 697 if (data->smu_features[GNLD_DPM_UVD].enabled) { 698 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK); 699 PP_ASSERT_WITH_CODE(!ret, 700 "[SetupDefaultDpmTable] failed to get dclk dpm levels!", 701 return ret); 702 } else { 703 dpm_table->count = 1; 704 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100; 705 } 706 vega20_init_dpm_state(&(dpm_table->dpm_state)); 707 708 /* dcefclk */ 709 dpm_table = &(data->dpm_table.dcef_table); 710 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 711 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK); 712 PP_ASSERT_WITH_CODE(!ret, 713 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!", 714 return ret); 715 } else { 716 dpm_table->count = 1; 717 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100; 718 } 719 vega20_init_dpm_state(&(dpm_table->dpm_state)); 720 721 /* pixclk */ 722 dpm_table = &(data->dpm_table.pixel_table); 723 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 724 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK); 725 PP_ASSERT_WITH_CODE(!ret, 726 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!", 727 return ret); 728 } else 729 dpm_table->count = 0; 730 vega20_init_dpm_state(&(dpm_table->dpm_state)); 731 732 /* dispclk */ 733 dpm_table = &(data->dpm_table.display_table); 734 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 735 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK); 736 PP_ASSERT_WITH_CODE(!ret, 737 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!", 738 return ret); 739 } else 740 dpm_table->count = 0; 741 vega20_init_dpm_state(&(dpm_table->dpm_state)); 742 743 /* phyclk */ 744 dpm_table = &(data->dpm_table.phy_table); 745 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 746 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK); 747 PP_ASSERT_WITH_CODE(!ret, 748 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!", 749 return ret); 750 } else 751 dpm_table->count = 0; 752 vega20_init_dpm_state(&(dpm_table->dpm_state)); 753 754 /* fclk */ 755 dpm_table = &(data->dpm_table.fclk_table); 756 if (data->smu_features[GNLD_DPM_FCLK].enabled) { 757 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK); 758 PP_ASSERT_WITH_CODE(!ret, 759 "[SetupDefaultDpmTable] failed to get fclk dpm levels!", 760 return ret); 761 } else { 762 dpm_table->count = 1; 763 dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100; 764 } 765 vega20_init_dpm_state(&(dpm_table->dpm_state)); 766 767 /* save a copy of the default DPM table */ 768 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 769 sizeof(struct vega20_dpm_table)); 770 771 return 0; 772 } 773 774 /** 775 * vega20_init_smc_table - Initializes the SMC table and uploads it 776 * 777 * @hwmgr: the address of the powerplay hardware manager. 778 * return: always 0 779 */ 780 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) 781 { 782 int result; 783 struct vega20_hwmgr *data = 784 (struct vega20_hwmgr *)(hwmgr->backend); 785 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 786 struct pp_atomfwctrl_bios_boot_up_values boot_up_values; 787 struct phm_ppt_v3_information *pptable_information = 788 (struct phm_ppt_v3_information *)hwmgr->pptable; 789 790 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values); 791 PP_ASSERT_WITH_CODE(!result, 792 "[InitSMCTable] Failed to get vbios bootup values!", 793 return result); 794 795 data->vbios_boot_state.vddc = boot_up_values.usVddc; 796 data->vbios_boot_state.vddci = boot_up_values.usVddci; 797 data->vbios_boot_state.mvddc = boot_up_values.usMvddc; 798 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; 799 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; 800 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 801 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; 802 data->vbios_boot_state.eclock = boot_up_values.ulEClk; 803 data->vbios_boot_state.vclock = boot_up_values.ulVClk; 804 data->vbios_boot_state.dclock = boot_up_values.ulDClk; 805 data->vbios_boot_state.fclock = boot_up_values.ulFClk; 806 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; 807 808 smum_send_msg_to_smc_with_parameter(hwmgr, 809 PPSMC_MSG_SetMinDeepSleepDcefclk, 810 (uint32_t)(data->vbios_boot_state.dcef_clock / 100), 811 NULL); 812 813 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); 814 815 result = smum_smc_table_manager(hwmgr, 816 (uint8_t *)pp_table, TABLE_PPTABLE, false); 817 PP_ASSERT_WITH_CODE(!result, 818 "[InitSMCTable] Failed to upload PPtable!", 819 return result); 820 821 return 0; 822 } 823 824 /* 825 * Override PCIe link speed and link width for DPM Level 1. PPTable entries 826 * reflect the ASIC capabilities and not the system capabilities. For e.g. 827 * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch 828 * to DPM1, it fails as system doesn't support Gen4. 829 */ 830 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) 831 { 832 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 833 struct vega20_hwmgr *data = 834 (struct vega20_hwmgr *)(hwmgr->backend); 835 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg; 836 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 837 int i; 838 int ret; 839 840 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 841 pcie_gen = 3; 842 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 843 pcie_gen = 2; 844 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 845 pcie_gen = 1; 846 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 847 pcie_gen = 0; 848 849 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 850 pcie_width = 6; 851 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 852 pcie_width = 5; 853 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 854 pcie_width = 4; 855 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 856 pcie_width = 3; 857 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 858 pcie_width = 2; 859 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 860 pcie_width = 1; 861 862 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 863 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 864 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 865 */ 866 for (i = 0; i < NUM_LINK_LEVELS; i++) { 867 pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen : 868 pp_table->PcieGenSpeed[i]; 869 pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width : 870 pp_table->PcieLaneCount[i]; 871 872 if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg != 873 pp_table->PcieLaneCount[i]) { 874 smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg; 875 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 876 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 877 NULL); 878 PP_ASSERT_WITH_CODE(!ret, 879 "[OverridePcieParameters] Attempt to override pcie params failed!", 880 return ret); 881 } 882 883 /* update the pptable */ 884 pp_table->PcieGenSpeed[i] = pcie_gen_arg; 885 pp_table->PcieLaneCount[i] = pcie_width_arg; 886 } 887 888 /* override to the highest if it's disabled from ppfeaturmask */ 889 if (data->registry_data.pcie_dpm_key_disabled) { 890 for (i = 0; i < NUM_LINK_LEVELS; i++) { 891 smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; 892 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 893 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 894 NULL); 895 PP_ASSERT_WITH_CODE(!ret, 896 "[OverridePcieParameters] Attempt to override pcie params failed!", 897 return ret); 898 899 pp_table->PcieGenSpeed[i] = pcie_gen; 900 pp_table->PcieLaneCount[i] = pcie_width; 901 } 902 ret = vega20_enable_smc_features(hwmgr, 903 false, 904 data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); 905 PP_ASSERT_WITH_CODE(!ret, 906 "Attempt to Disable DPM LINK Failed!", 907 return ret); 908 data->smu_features[GNLD_DPM_LINK].enabled = false; 909 data->smu_features[GNLD_DPM_LINK].supported = false; 910 } 911 912 return 0; 913 } 914 915 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) 916 { 917 struct vega20_hwmgr *data = 918 (struct vega20_hwmgr *)(hwmgr->backend); 919 uint32_t allowed_features_low = 0, allowed_features_high = 0; 920 int i; 921 int ret = 0; 922 923 for (i = 0; i < GNLD_FEATURES_MAX; i++) 924 if (data->smu_features[i].allowed) 925 data->smu_features[i].smu_feature_id > 31 ? 926 (allowed_features_high |= 927 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) 928 & 0xFFFFFFFF)) : 929 (allowed_features_low |= 930 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) 931 & 0xFFFFFFFF)); 932 933 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 934 PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL); 935 PP_ASSERT_WITH_CODE(!ret, 936 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!", 937 return ret); 938 939 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 940 PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL); 941 PP_ASSERT_WITH_CODE(!ret, 942 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!", 943 return ret); 944 945 return 0; 946 } 947 948 static int vega20_run_btc(struct pp_hwmgr *hwmgr) 949 { 950 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL); 951 } 952 953 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr) 954 { 955 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL); 956 } 957 958 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) 959 { 960 struct vega20_hwmgr *data = 961 (struct vega20_hwmgr *)(hwmgr->backend); 962 uint64_t features_enabled; 963 int i; 964 bool enabled; 965 int ret = 0; 966 967 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 968 PPSMC_MSG_EnableAllSmuFeatures, 969 NULL)) == 0, 970 "[EnableAllSMUFeatures] Failed to enable all smu features!", 971 return ret); 972 973 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 974 PP_ASSERT_WITH_CODE(!ret, 975 "[EnableAllSmuFeatures] Failed to get enabled smc features!", 976 return ret); 977 978 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 979 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? 980 true : false; 981 data->smu_features[i].enabled = enabled; 982 data->smu_features[i].supported = enabled; 983 984 #if 0 985 if (data->smu_features[i].allowed && !enabled) 986 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i); 987 else if (!data->smu_features[i].allowed && enabled) 988 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i); 989 #endif 990 } 991 992 return 0; 993 } 994 995 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr) 996 { 997 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 998 999 if (data->smu_features[GNLD_DPM_UCLK].enabled) 1000 return smum_send_msg_to_smc_with_parameter(hwmgr, 1001 PPSMC_MSG_SetUclkFastSwitch, 1002 1, 1003 NULL); 1004 1005 return 0; 1006 } 1007 1008 static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr) 1009 { 1010 struct vega20_hwmgr *data = 1011 (struct vega20_hwmgr *)(hwmgr->backend); 1012 1013 return smum_send_msg_to_smc_with_parameter(hwmgr, 1014 PPSMC_MSG_SetFclkGfxClkRatio, 1015 data->registry_data.fclk_gfxclk_ratio, 1016 NULL); 1017 } 1018 1019 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) 1020 { 1021 struct vega20_hwmgr *data = 1022 (struct vega20_hwmgr *)(hwmgr->backend); 1023 int i, ret = 0; 1024 1025 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 1026 PPSMC_MSG_DisableAllSmuFeatures, 1027 NULL)) == 0, 1028 "[DisableAllSMUFeatures] Failed to disable all smu features!", 1029 return ret); 1030 1031 for (i = 0; i < GNLD_FEATURES_MAX; i++) 1032 data->smu_features[i].enabled = 0; 1033 1034 return 0; 1035 } 1036 1037 static int vega20_od8_set_feature_capabilities( 1038 struct pp_hwmgr *hwmgr) 1039 { 1040 struct phm_ppt_v3_information *pptable_information = 1041 (struct phm_ppt_v3_information *)hwmgr->pptable; 1042 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1043 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 1044 struct vega20_od8_settings *od_settings = &(data->od8_settings); 1045 1046 od_settings->overdrive8_capabilities = 0; 1047 1048 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { 1049 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] && 1050 pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 && 1051 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 && 1052 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >= 1053 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) 1054 od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS; 1055 1056 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] && 1057 (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >= 1058 pp_table->MinVoltageGfx / VOLTAGE_SCALE) && 1059 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <= 1060 pp_table->MaxVoltageGfx / VOLTAGE_SCALE) && 1061 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >= 1062 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1])) 1063 od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE; 1064 } 1065 1066 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 1067 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] = 1068 data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value; 1069 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && 1070 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && 1071 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && 1072 (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >= 1073 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX])) 1074 od_settings->overdrive8_capabilities |= OD8_UCLK_MAX; 1075 } 1076 1077 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] && 1078 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 && 1079 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 && 1080 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 && 1081 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100) 1082 od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT; 1083 1084 if (data->smu_features[GNLD_FAN_CONTROL].enabled) { 1085 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] && 1086 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && 1087 pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && 1088 (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >= 1089 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) 1090 od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK; 1091 1092 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] && 1093 (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >= 1094 (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) && 1095 pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 && 1096 (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >= 1097 pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) 1098 od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN; 1099 } 1100 1101 if (data->smu_features[GNLD_THERMAL].enabled) { 1102 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] && 1103 pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 && 1104 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 && 1105 (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >= 1106 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) 1107 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN; 1108 1109 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] && 1110 pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && 1111 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && 1112 (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >= 1113 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) 1114 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM; 1115 } 1116 1117 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE]) 1118 od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE; 1119 1120 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] && 1121 pp_table->FanZeroRpmEnable) 1122 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; 1123 1124 if (!od_settings->overdrive8_capabilities) 1125 hwmgr->od_enabled = false; 1126 1127 return 0; 1128 } 1129 1130 static int vega20_od8_set_feature_id( 1131 struct pp_hwmgr *hwmgr) 1132 { 1133 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1134 struct vega20_od8_settings *od_settings = &(data->od8_settings); 1135 1136 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { 1137 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = 1138 OD8_GFXCLK_LIMITS; 1139 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = 1140 OD8_GFXCLK_LIMITS; 1141 } else { 1142 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = 1143 0; 1144 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = 1145 0; 1146 } 1147 1148 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { 1149 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = 1150 OD8_GFXCLK_CURVE; 1151 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = 1152 OD8_GFXCLK_CURVE; 1153 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = 1154 OD8_GFXCLK_CURVE; 1155 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = 1156 OD8_GFXCLK_CURVE; 1157 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = 1158 OD8_GFXCLK_CURVE; 1159 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = 1160 OD8_GFXCLK_CURVE; 1161 } else { 1162 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = 1163 0; 1164 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = 1165 0; 1166 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = 1167 0; 1168 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = 1169 0; 1170 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = 1171 0; 1172 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = 1173 0; 1174 } 1175 1176 if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX) 1177 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX; 1178 else 1179 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0; 1180 1181 if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT) 1182 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT; 1183 else 1184 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0; 1185 1186 if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) 1187 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = 1188 OD8_ACOUSTIC_LIMIT_SCLK; 1189 else 1190 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = 1191 0; 1192 1193 if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) 1194 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = 1195 OD8_FAN_SPEED_MIN; 1196 else 1197 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = 1198 0; 1199 1200 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) 1201 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = 1202 OD8_TEMPERATURE_FAN; 1203 else 1204 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = 1205 0; 1206 1207 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) 1208 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = 1209 OD8_TEMPERATURE_SYSTEM; 1210 else 1211 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = 1212 0; 1213 1214 return 0; 1215 } 1216 1217 static int vega20_od8_get_gfx_clock_base_voltage( 1218 struct pp_hwmgr *hwmgr, 1219 uint32_t *voltage, 1220 uint32_t freq) 1221 { 1222 int ret = 0; 1223 1224 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1225 PPSMC_MSG_GetAVFSVoltageByDpm, 1226 ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq), 1227 voltage); 1228 PP_ASSERT_WITH_CODE(!ret, 1229 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!", 1230 return ret); 1231 1232 *voltage = *voltage / VOLTAGE_SCALE; 1233 1234 return 0; 1235 } 1236 1237 static int vega20_od8_initialize_default_settings( 1238 struct pp_hwmgr *hwmgr) 1239 { 1240 struct phm_ppt_v3_information *pptable_information = 1241 (struct phm_ppt_v3_information *)hwmgr->pptable; 1242 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1243 struct vega20_od8_settings *od8_settings = &(data->od8_settings); 1244 OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); 1245 int i, ret = 0; 1246 1247 /* Set Feature Capabilities */ 1248 vega20_od8_set_feature_capabilities(hwmgr); 1249 1250 /* Map FeatureID to individual settings */ 1251 vega20_od8_set_feature_id(hwmgr); 1252 1253 /* Set default values */ 1254 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true); 1255 PP_ASSERT_WITH_CODE(!ret, 1256 "Failed to export over drive table!", 1257 return ret); 1258 1259 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { 1260 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = 1261 od_table->GfxclkFmin; 1262 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = 1263 od_table->GfxclkFmax; 1264 } else { 1265 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = 1266 0; 1267 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = 1268 0; 1269 } 1270 1271 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { 1272 od_table->GfxclkFreq1 = od_table->GfxclkFmin; 1273 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = 1274 od_table->GfxclkFreq1; 1275 1276 od_table->GfxclkFreq3 = od_table->GfxclkFmax; 1277 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = 1278 od_table->GfxclkFreq3; 1279 1280 od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2; 1281 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = 1282 od_table->GfxclkFreq2; 1283 1284 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1285 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value), 1286 od_table->GfxclkFreq1), 1287 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1288 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0); 1289 od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value 1290 * VOLTAGE_SCALE; 1291 1292 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1293 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value), 1294 od_table->GfxclkFreq2), 1295 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1296 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0); 1297 od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value 1298 * VOLTAGE_SCALE; 1299 1300 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, 1301 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value), 1302 od_table->GfxclkFreq3), 1303 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", 1304 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0); 1305 od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value 1306 * VOLTAGE_SCALE; 1307 } else { 1308 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = 1309 0; 1310 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 1311 0; 1312 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = 1313 0; 1314 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 1315 0; 1316 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = 1317 0; 1318 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 1319 0; 1320 } 1321 1322 if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX) 1323 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = 1324 od_table->UclkFmax; 1325 else 1326 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = 1327 0; 1328 1329 if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT) 1330 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = 1331 od_table->OverDrivePct; 1332 else 1333 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = 1334 0; 1335 1336 if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) 1337 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = 1338 od_table->FanMaximumRpm; 1339 else 1340 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = 1341 0; 1342 1343 if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) 1344 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = 1345 od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100; 1346 else 1347 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = 1348 0; 1349 1350 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) 1351 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = 1352 od_table->FanTargetTemperature; 1353 else 1354 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = 1355 0; 1356 1357 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) 1358 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = 1359 od_table->MaxOpTemp; 1360 else 1361 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = 1362 0; 1363 1364 for (i = 0; i < OD8_SETTING_COUNT; i++) { 1365 if (od8_settings->od8_settings_array[i].feature_id) { 1366 od8_settings->od8_settings_array[i].min_value = 1367 pptable_information->od_settings_min[i]; 1368 od8_settings->od8_settings_array[i].max_value = 1369 pptable_information->od_settings_max[i]; 1370 od8_settings->od8_settings_array[i].current_value = 1371 od8_settings->od8_settings_array[i].default_value; 1372 } else { 1373 od8_settings->od8_settings_array[i].min_value = 1374 0; 1375 od8_settings->od8_settings_array[i].max_value = 1376 0; 1377 od8_settings->od8_settings_array[i].current_value = 1378 0; 1379 } 1380 } 1381 1382 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false); 1383 PP_ASSERT_WITH_CODE(!ret, 1384 "Failed to import over drive table!", 1385 return ret); 1386 1387 return 0; 1388 } 1389 1390 static int vega20_od8_set_settings( 1391 struct pp_hwmgr *hwmgr, 1392 uint32_t index, 1393 uint32_t value) 1394 { 1395 OverDriveTable_t od_table; 1396 int ret = 0; 1397 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1398 struct vega20_od8_single_setting *od8_settings = 1399 data->od8_settings.od8_settings_array; 1400 1401 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true); 1402 PP_ASSERT_WITH_CODE(!ret, 1403 "Failed to export over drive table!", 1404 return ret); 1405 1406 switch(index) { 1407 case OD8_SETTING_GFXCLK_FMIN: 1408 od_table.GfxclkFmin = (uint16_t)value; 1409 break; 1410 case OD8_SETTING_GFXCLK_FMAX: 1411 if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value || 1412 value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) 1413 return -EINVAL; 1414 1415 od_table.GfxclkFmax = (uint16_t)value; 1416 break; 1417 case OD8_SETTING_GFXCLK_FREQ1: 1418 od_table.GfxclkFreq1 = (uint16_t)value; 1419 break; 1420 case OD8_SETTING_GFXCLK_VOLTAGE1: 1421 od_table.GfxclkVolt1 = (uint16_t)value; 1422 break; 1423 case OD8_SETTING_GFXCLK_FREQ2: 1424 od_table.GfxclkFreq2 = (uint16_t)value; 1425 break; 1426 case OD8_SETTING_GFXCLK_VOLTAGE2: 1427 od_table.GfxclkVolt2 = (uint16_t)value; 1428 break; 1429 case OD8_SETTING_GFXCLK_FREQ3: 1430 od_table.GfxclkFreq3 = (uint16_t)value; 1431 break; 1432 case OD8_SETTING_GFXCLK_VOLTAGE3: 1433 od_table.GfxclkVolt3 = (uint16_t)value; 1434 break; 1435 case OD8_SETTING_UCLK_FMAX: 1436 if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || 1437 value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) 1438 return -EINVAL; 1439 od_table.UclkFmax = (uint16_t)value; 1440 break; 1441 case OD8_SETTING_POWER_PERCENTAGE: 1442 od_table.OverDrivePct = (int16_t)value; 1443 break; 1444 case OD8_SETTING_FAN_ACOUSTIC_LIMIT: 1445 od_table.FanMaximumRpm = (uint16_t)value; 1446 break; 1447 case OD8_SETTING_FAN_MIN_SPEED: 1448 od_table.FanMinimumPwm = (uint16_t)value; 1449 break; 1450 case OD8_SETTING_FAN_TARGET_TEMP: 1451 od_table.FanTargetTemperature = (uint16_t)value; 1452 break; 1453 case OD8_SETTING_OPERATING_TEMP_MAX: 1454 od_table.MaxOpTemp = (uint16_t)value; 1455 break; 1456 } 1457 1458 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false); 1459 PP_ASSERT_WITH_CODE(!ret, 1460 "Failed to import over drive table!", 1461 return ret); 1462 1463 return 0; 1464 } 1465 1466 static int vega20_get_sclk_od( 1467 struct pp_hwmgr *hwmgr) 1468 { 1469 struct vega20_hwmgr *data = hwmgr->backend; 1470 struct vega20_single_dpm_table *sclk_table = 1471 &(data->dpm_table.gfx_table); 1472 struct vega20_single_dpm_table *golden_sclk_table = 1473 &(data->golden_dpm_table.gfx_table); 1474 int value = sclk_table->dpm_levels[sclk_table->count - 1].value; 1475 int golden_value = golden_sclk_table->dpm_levels 1476 [golden_sclk_table->count - 1].value; 1477 1478 /* od percentage */ 1479 value -= golden_value; 1480 value = DIV_ROUND_UP(value * 100, golden_value); 1481 1482 return value; 1483 } 1484 1485 static int vega20_set_sclk_od( 1486 struct pp_hwmgr *hwmgr, uint32_t value) 1487 { 1488 struct vega20_hwmgr *data = hwmgr->backend; 1489 struct vega20_single_dpm_table *golden_sclk_table = 1490 &(data->golden_dpm_table.gfx_table); 1491 uint32_t od_sclk; 1492 int ret = 0; 1493 1494 od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value; 1495 od_sclk /= 100; 1496 od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; 1497 1498 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk); 1499 PP_ASSERT_WITH_CODE(!ret, 1500 "[SetSclkOD] failed to set od gfxclk!", 1501 return ret); 1502 1503 /* retrieve updated gfxclk table */ 1504 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 1505 PP_ASSERT_WITH_CODE(!ret, 1506 "[SetSclkOD] failed to refresh gfxclk table!", 1507 return ret); 1508 1509 return 0; 1510 } 1511 1512 static int vega20_get_mclk_od( 1513 struct pp_hwmgr *hwmgr) 1514 { 1515 struct vega20_hwmgr *data = hwmgr->backend; 1516 struct vega20_single_dpm_table *mclk_table = 1517 &(data->dpm_table.mem_table); 1518 struct vega20_single_dpm_table *golden_mclk_table = 1519 &(data->golden_dpm_table.mem_table); 1520 int value = mclk_table->dpm_levels[mclk_table->count - 1].value; 1521 int golden_value = golden_mclk_table->dpm_levels 1522 [golden_mclk_table->count - 1].value; 1523 1524 /* od percentage */ 1525 value -= golden_value; 1526 value = DIV_ROUND_UP(value * 100, golden_value); 1527 1528 return value; 1529 } 1530 1531 static int vega20_set_mclk_od( 1532 struct pp_hwmgr *hwmgr, uint32_t value) 1533 { 1534 struct vega20_hwmgr *data = hwmgr->backend; 1535 struct vega20_single_dpm_table *golden_mclk_table = 1536 &(data->golden_dpm_table.mem_table); 1537 uint32_t od_mclk; 1538 int ret = 0; 1539 1540 od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value; 1541 od_mclk /= 100; 1542 od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; 1543 1544 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk); 1545 PP_ASSERT_WITH_CODE(!ret, 1546 "[SetMclkOD] failed to set od memclk!", 1547 return ret); 1548 1549 /* retrieve updated memclk table */ 1550 ret = vega20_setup_memclk_dpm_table(hwmgr); 1551 PP_ASSERT_WITH_CODE(!ret, 1552 "[SetMclkOD] failed to refresh memclk table!", 1553 return ret); 1554 1555 return 0; 1556 } 1557 1558 static int vega20_populate_umdpstate_clocks( 1559 struct pp_hwmgr *hwmgr) 1560 { 1561 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1562 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table); 1563 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table); 1564 1565 hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value; 1566 hwmgr->pstate_mclk = mem_table->dpm_levels[0].value; 1567 1568 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && 1569 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) { 1570 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 1571 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 1572 } 1573 1574 hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100; 1575 hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100; 1576 1577 return 0; 1578 } 1579 1580 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, 1581 PP_Clock *clock, PPCLK_e clock_select) 1582 { 1583 int ret = 0; 1584 1585 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1586 PPSMC_MSG_GetDcModeMaxDpmFreq, 1587 (clock_select << 16), 1588 clock)) == 0, 1589 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!", 1590 return ret); 1591 1592 /* if DC limit is zero, return AC limit */ 1593 if (*clock == 0) { 1594 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1595 PPSMC_MSG_GetMaxDpmFreq, 1596 (clock_select << 16), 1597 clock)) == 0, 1598 "[GetMaxSustainableClock] failed to get max AC clock from SMC!", 1599 return ret); 1600 } 1601 1602 return 0; 1603 } 1604 1605 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr) 1606 { 1607 struct vega20_hwmgr *data = 1608 (struct vega20_hwmgr *)(hwmgr->backend); 1609 struct vega20_max_sustainable_clocks *max_sustainable_clocks = 1610 &(data->max_sustainable_clocks); 1611 int ret = 0; 1612 1613 max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100; 1614 max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100; 1615 max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100; 1616 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 1617 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 1618 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 1619 1620 if (data->smu_features[GNLD_DPM_UCLK].enabled) 1621 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1622 &(max_sustainable_clocks->uclock), 1623 PPCLK_UCLK)) == 0, 1624 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!", 1625 return ret); 1626 1627 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) 1628 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1629 &(max_sustainable_clocks->soc_clock), 1630 PPCLK_SOCCLK)) == 0, 1631 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!", 1632 return ret); 1633 1634 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 1635 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1636 &(max_sustainable_clocks->dcef_clock), 1637 PPCLK_DCEFCLK)) == 0, 1638 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!", 1639 return ret); 1640 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1641 &(max_sustainable_clocks->display_clock), 1642 PPCLK_DISPCLK)) == 0, 1643 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!", 1644 return ret); 1645 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1646 &(max_sustainable_clocks->phy_clock), 1647 PPCLK_PHYCLK)) == 0, 1648 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!", 1649 return ret); 1650 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, 1651 &(max_sustainable_clocks->pixel_clock), 1652 PPCLK_PIXCLK)) == 0, 1653 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!", 1654 return ret); 1655 } 1656 1657 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 1658 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 1659 1660 return 0; 1661 } 1662 1663 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr) 1664 { 1665 int result; 1666 1667 result = smum_send_msg_to_smc(hwmgr, 1668 PPSMC_MSG_SetMGpuFanBoostLimitRpm, 1669 NULL); 1670 PP_ASSERT_WITH_CODE(!result, 1671 "[EnableMgpuFan] Failed to enable mgpu fan boost!", 1672 return result); 1673 1674 return 0; 1675 } 1676 1677 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) 1678 { 1679 struct vega20_hwmgr *data = 1680 (struct vega20_hwmgr *)(hwmgr->backend); 1681 1682 data->uvd_power_gated = true; 1683 data->vce_power_gated = true; 1684 } 1685 1686 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1687 { 1688 int result = 0; 1689 1690 smum_send_msg_to_smc_with_parameter(hwmgr, 1691 PPSMC_MSG_NumOfDisplays, 0, NULL); 1692 1693 result = vega20_set_allowed_featuresmask(hwmgr); 1694 PP_ASSERT_WITH_CODE(!result, 1695 "[EnableDPMTasks] Failed to set allowed featuresmask!\n", 1696 return result); 1697 1698 result = vega20_init_smc_table(hwmgr); 1699 PP_ASSERT_WITH_CODE(!result, 1700 "[EnableDPMTasks] Failed to initialize SMC table!", 1701 return result); 1702 1703 result = vega20_run_btc(hwmgr); 1704 PP_ASSERT_WITH_CODE(!result, 1705 "[EnableDPMTasks] Failed to run btc!", 1706 return result); 1707 1708 result = vega20_run_btc_afll(hwmgr); 1709 PP_ASSERT_WITH_CODE(!result, 1710 "[EnableDPMTasks] Failed to run btc afll!", 1711 return result); 1712 1713 result = vega20_enable_all_smu_features(hwmgr); 1714 PP_ASSERT_WITH_CODE(!result, 1715 "[EnableDPMTasks] Failed to enable all smu features!", 1716 return result); 1717 1718 result = vega20_override_pcie_parameters(hwmgr); 1719 PP_ASSERT_WITH_CODE(!result, 1720 "[EnableDPMTasks] Failed to override pcie parameters!", 1721 return result); 1722 1723 result = vega20_notify_smc_display_change(hwmgr); 1724 PP_ASSERT_WITH_CODE(!result, 1725 "[EnableDPMTasks] Failed to notify smc display change!", 1726 return result); 1727 1728 result = vega20_send_clock_ratio(hwmgr); 1729 PP_ASSERT_WITH_CODE(!result, 1730 "[EnableDPMTasks] Failed to send clock ratio!", 1731 return result); 1732 1733 /* Initialize UVD/VCE powergating state */ 1734 vega20_init_powergate_state(hwmgr); 1735 1736 result = vega20_setup_default_dpm_tables(hwmgr); 1737 PP_ASSERT_WITH_CODE(!result, 1738 "[EnableDPMTasks] Failed to setup default DPM tables!", 1739 return result); 1740 1741 result = vega20_init_max_sustainable_clocks(hwmgr); 1742 PP_ASSERT_WITH_CODE(!result, 1743 "[EnableDPMTasks] Failed to get maximum sustainable clocks!", 1744 return result); 1745 1746 result = vega20_power_control_set_level(hwmgr); 1747 PP_ASSERT_WITH_CODE(!result, 1748 "[EnableDPMTasks] Failed to power control set level!", 1749 return result); 1750 1751 result = vega20_od8_initialize_default_settings(hwmgr); 1752 PP_ASSERT_WITH_CODE(!result, 1753 "[EnableDPMTasks] Failed to initialize odn settings!", 1754 return result); 1755 1756 result = vega20_populate_umdpstate_clocks(hwmgr); 1757 PP_ASSERT_WITH_CODE(!result, 1758 "[EnableDPMTasks] Failed to populate umdpstate clocks!", 1759 return result); 1760 1761 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit, 1762 POWER_SOURCE_AC << 16, &hwmgr->default_power_limit); 1763 PP_ASSERT_WITH_CODE(!result, 1764 "[GetPptLimit] get default PPT limit failed!", 1765 return result); 1766 hwmgr->power_limit = 1767 hwmgr->default_power_limit; 1768 1769 return 0; 1770 } 1771 1772 static uint32_t vega20_find_lowest_dpm_level( 1773 struct vega20_single_dpm_table *table) 1774 { 1775 uint32_t i; 1776 1777 for (i = 0; i < table->count; i++) { 1778 if (table->dpm_levels[i].enabled) 1779 break; 1780 } 1781 if (i >= table->count) { 1782 i = 0; 1783 table->dpm_levels[i].enabled = true; 1784 } 1785 1786 return i; 1787 } 1788 1789 static uint32_t vega20_find_highest_dpm_level( 1790 struct vega20_single_dpm_table *table) 1791 { 1792 int i = 0; 1793 1794 PP_ASSERT_WITH_CODE(table != NULL, 1795 "[FindHighestDPMLevel] DPM Table does not exist!", 1796 return 0); 1797 PP_ASSERT_WITH_CODE(table->count > 0, 1798 "[FindHighestDPMLevel] DPM Table has no entry!", 1799 return 0); 1800 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER, 1801 "[FindHighestDPMLevel] DPM Table has too many entries!", 1802 return MAX_REGULAR_DPM_NUMBER - 1); 1803 1804 for (i = table->count - 1; i >= 0; i--) { 1805 if (table->dpm_levels[i].enabled) 1806 break; 1807 } 1808 if (i < 0) { 1809 i = 0; 1810 table->dpm_levels[i].enabled = true; 1811 } 1812 1813 return i; 1814 } 1815 1816 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) 1817 { 1818 struct vega20_hwmgr *data = 1819 (struct vega20_hwmgr *)(hwmgr->backend); 1820 uint32_t min_freq; 1821 int ret = 0; 1822 1823 if (data->smu_features[GNLD_DPM_GFXCLK].enabled && 1824 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { 1825 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; 1826 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1827 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1828 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff), 1829 NULL)), 1830 "Failed to set soft min gfxclk !", 1831 return ret); 1832 } 1833 1834 if (data->smu_features[GNLD_DPM_UCLK].enabled && 1835 (feature_mask & FEATURE_DPM_UCLK_MASK)) { 1836 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; 1837 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1838 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1839 (PPCLK_UCLK << 16) | (min_freq & 0xffff), 1840 NULL)), 1841 "Failed to set soft min memclk !", 1842 return ret); 1843 } 1844 1845 if (data->smu_features[GNLD_DPM_UVD].enabled && 1846 (feature_mask & FEATURE_DPM_UVD_MASK)) { 1847 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; 1848 1849 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1850 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1851 (PPCLK_VCLK << 16) | (min_freq & 0xffff), 1852 NULL)), 1853 "Failed to set soft min vclk!", 1854 return ret); 1855 1856 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level; 1857 1858 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1859 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1860 (PPCLK_DCLK << 16) | (min_freq & 0xffff), 1861 NULL)), 1862 "Failed to set soft min dclk!", 1863 return ret); 1864 } 1865 1866 if (data->smu_features[GNLD_DPM_VCE].enabled && 1867 (feature_mask & FEATURE_DPM_VCE_MASK)) { 1868 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; 1869 1870 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1871 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1872 (PPCLK_ECLK << 16) | (min_freq & 0xffff), 1873 NULL)), 1874 "Failed to set soft min eclk!", 1875 return ret); 1876 } 1877 1878 if (data->smu_features[GNLD_DPM_SOCCLK].enabled && 1879 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { 1880 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; 1881 1882 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1883 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1884 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff), 1885 NULL)), 1886 "Failed to set soft min socclk!", 1887 return ret); 1888 } 1889 1890 if (data->smu_features[GNLD_DPM_FCLK].enabled && 1891 (feature_mask & FEATURE_DPM_FCLK_MASK)) { 1892 min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level; 1893 1894 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1895 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1896 (PPCLK_FCLK << 16) | (min_freq & 0xffff), 1897 NULL)), 1898 "Failed to set soft min fclk!", 1899 return ret); 1900 } 1901 1902 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled && 1903 (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) { 1904 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level; 1905 1906 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1907 hwmgr, PPSMC_MSG_SetHardMinByFreq, 1908 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff), 1909 NULL)), 1910 "Failed to set hard min dcefclk!", 1911 return ret); 1912 } 1913 1914 return ret; 1915 } 1916 1917 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) 1918 { 1919 struct vega20_hwmgr *data = 1920 (struct vega20_hwmgr *)(hwmgr->backend); 1921 uint32_t max_freq; 1922 int ret = 0; 1923 1924 if (data->smu_features[GNLD_DPM_GFXCLK].enabled && 1925 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { 1926 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; 1927 1928 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1929 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1930 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff), 1931 NULL)), 1932 "Failed to set soft max gfxclk!", 1933 return ret); 1934 } 1935 1936 if (data->smu_features[GNLD_DPM_UCLK].enabled && 1937 (feature_mask & FEATURE_DPM_UCLK_MASK)) { 1938 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; 1939 1940 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1941 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1942 (PPCLK_UCLK << 16) | (max_freq & 0xffff), 1943 NULL)), 1944 "Failed to set soft max memclk!", 1945 return ret); 1946 } 1947 1948 if (data->smu_features[GNLD_DPM_UVD].enabled && 1949 (feature_mask & FEATURE_DPM_UVD_MASK)) { 1950 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; 1951 1952 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1953 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1954 (PPCLK_VCLK << 16) | (max_freq & 0xffff), 1955 NULL)), 1956 "Failed to set soft max vclk!", 1957 return ret); 1958 1959 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; 1960 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1961 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1962 (PPCLK_DCLK << 16) | (max_freq & 0xffff), 1963 NULL)), 1964 "Failed to set soft max dclk!", 1965 return ret); 1966 } 1967 1968 if (data->smu_features[GNLD_DPM_VCE].enabled && 1969 (feature_mask & FEATURE_DPM_VCE_MASK)) { 1970 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; 1971 1972 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1973 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1974 (PPCLK_ECLK << 16) | (max_freq & 0xffff), 1975 NULL)), 1976 "Failed to set soft max eclk!", 1977 return ret); 1978 } 1979 1980 if (data->smu_features[GNLD_DPM_SOCCLK].enabled && 1981 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { 1982 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; 1983 1984 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1985 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1986 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff), 1987 NULL)), 1988 "Failed to set soft max socclk!", 1989 return ret); 1990 } 1991 1992 if (data->smu_features[GNLD_DPM_FCLK].enabled && 1993 (feature_mask & FEATURE_DPM_FCLK_MASK)) { 1994 max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level; 1995 1996 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1997 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1998 (PPCLK_FCLK << 16) | (max_freq & 0xffff), 1999 NULL)), 2000 "Failed to set soft max fclk!", 2001 return ret); 2002 } 2003 2004 return ret; 2005 } 2006 2007 static int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) 2008 { 2009 struct vega20_hwmgr *data = 2010 (struct vega20_hwmgr *)(hwmgr->backend); 2011 int ret = 0; 2012 2013 if (data->smu_features[GNLD_DPM_VCE].supported) { 2014 if (data->smu_features[GNLD_DPM_VCE].enabled == enable) { 2015 if (enable) 2016 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n"); 2017 else 2018 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n"); 2019 } 2020 2021 ret = vega20_enable_smc_features(hwmgr, 2022 enable, 2023 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap); 2024 PP_ASSERT_WITH_CODE(!ret, 2025 "Attempt to Enable/Disable DPM VCE Failed!", 2026 return ret); 2027 data->smu_features[GNLD_DPM_VCE].enabled = enable; 2028 } 2029 2030 return 0; 2031 } 2032 2033 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr, 2034 uint32_t *clock, 2035 PPCLK_e clock_select, 2036 bool max) 2037 { 2038 int ret; 2039 *clock = 0; 2040 2041 if (max) { 2042 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2043 PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16), 2044 clock)) == 0, 2045 "[GetClockRanges] Failed to get max clock from SMC!", 2046 return ret); 2047 } else { 2048 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2049 PPSMC_MSG_GetMinDpmFreq, 2050 (clock_select << 16), 2051 clock)) == 0, 2052 "[GetClockRanges] Failed to get min clock from SMC!", 2053 return ret); 2054 } 2055 2056 return 0; 2057 } 2058 2059 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 2060 { 2061 struct vega20_hwmgr *data = 2062 (struct vega20_hwmgr *)(hwmgr->backend); 2063 uint32_t gfx_clk; 2064 int ret = 0; 2065 2066 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, 2067 "[GetSclks]: gfxclk dpm not enabled!\n", 2068 return -EPERM); 2069 2070 if (low) { 2071 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false); 2072 PP_ASSERT_WITH_CODE(!ret, 2073 "[GetSclks]: fail to get min PPCLK_GFXCLK\n", 2074 return ret); 2075 } else { 2076 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true); 2077 PP_ASSERT_WITH_CODE(!ret, 2078 "[GetSclks]: fail to get max PPCLK_GFXCLK\n", 2079 return ret); 2080 } 2081 2082 return (gfx_clk * 100); 2083 } 2084 2085 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 2086 { 2087 struct vega20_hwmgr *data = 2088 (struct vega20_hwmgr *)(hwmgr->backend); 2089 uint32_t mem_clk; 2090 int ret = 0; 2091 2092 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, 2093 "[MemMclks]: memclk dpm not enabled!\n", 2094 return -EPERM); 2095 2096 if (low) { 2097 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false); 2098 PP_ASSERT_WITH_CODE(!ret, 2099 "[GetMclks]: fail to get min PPCLK_UCLK\n", 2100 return ret); 2101 } else { 2102 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true); 2103 PP_ASSERT_WITH_CODE(!ret, 2104 "[GetMclks]: fail to get max PPCLK_UCLK\n", 2105 return ret); 2106 } 2107 2108 return (mem_clk * 100); 2109 } 2110 2111 static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, 2112 SmuMetrics_t *metrics_table, 2113 bool bypass_cache) 2114 { 2115 struct vega20_hwmgr *data = 2116 (struct vega20_hwmgr *)(hwmgr->backend); 2117 int ret = 0; 2118 2119 if (bypass_cache || 2120 !data->metrics_time || 2121 time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) { 2122 ret = smum_smc_table_manager(hwmgr, 2123 (uint8_t *)(&data->metrics_table), 2124 TABLE_SMU_METRICS, 2125 true); 2126 if (ret) { 2127 pr_info("Failed to export SMU metrics table!\n"); 2128 return ret; 2129 } 2130 data->metrics_time = jiffies; 2131 } 2132 2133 if (metrics_table) 2134 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t)); 2135 2136 return ret; 2137 } 2138 2139 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, 2140 uint32_t *query) 2141 { 2142 int ret = 0; 2143 SmuMetrics_t metrics_table; 2144 2145 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2146 if (ret) 2147 return ret; 2148 2149 /* For the 40.46 release, they changed the value name */ 2150 if (hwmgr->smu_version == 0x282e00) 2151 *query = metrics_table.AverageSocketPower << 8; 2152 else 2153 *query = metrics_table.CurrSocketPower << 8; 2154 2155 return ret; 2156 } 2157 2158 static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr, 2159 PPCLK_e clk_id, uint32_t *clk_freq) 2160 { 2161 int ret = 0; 2162 2163 *clk_freq = 0; 2164 2165 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2166 PPSMC_MSG_GetDpmClockFreq, (clk_id << 16), 2167 clk_freq)) == 0, 2168 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!", 2169 return ret); 2170 2171 *clk_freq = *clk_freq * 100; 2172 2173 return 0; 2174 } 2175 2176 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr, 2177 int idx, 2178 uint32_t *activity_percent) 2179 { 2180 int ret = 0; 2181 SmuMetrics_t metrics_table; 2182 2183 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2184 if (ret) 2185 return ret; 2186 2187 switch (idx) { 2188 case AMDGPU_PP_SENSOR_GPU_LOAD: 2189 *activity_percent = metrics_table.AverageGfxActivity; 2190 break; 2191 case AMDGPU_PP_SENSOR_MEM_LOAD: 2192 *activity_percent = metrics_table.AverageUclkActivity; 2193 break; 2194 default: 2195 pr_err("Invalid index for retrieving clock activity\n"); 2196 return -EINVAL; 2197 } 2198 2199 return ret; 2200 } 2201 2202 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, 2203 void *value, int *size) 2204 { 2205 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2206 struct amdgpu_device *adev = hwmgr->adev; 2207 SmuMetrics_t metrics_table; 2208 uint32_t val_vid; 2209 int ret = 0; 2210 2211 switch (idx) { 2212 case AMDGPU_PP_SENSOR_GFX_SCLK: 2213 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2214 if (ret) 2215 return ret; 2216 2217 *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100; 2218 *size = 4; 2219 break; 2220 case AMDGPU_PP_SENSOR_GFX_MCLK: 2221 ret = vega20_get_current_clk_freq(hwmgr, 2222 PPCLK_UCLK, 2223 (uint32_t *)value); 2224 if (!ret) 2225 *size = 4; 2226 break; 2227 case AMDGPU_PP_SENSOR_GPU_LOAD: 2228 case AMDGPU_PP_SENSOR_MEM_LOAD: 2229 ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value); 2230 if (!ret) 2231 *size = 4; 2232 break; 2233 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 2234 *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr); 2235 *size = 4; 2236 break; 2237 case AMDGPU_PP_SENSOR_EDGE_TEMP: 2238 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2239 if (ret) 2240 return ret; 2241 2242 *((uint32_t *)value) = metrics_table.TemperatureEdge * 2243 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2244 *size = 4; 2245 break; 2246 case AMDGPU_PP_SENSOR_MEM_TEMP: 2247 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); 2248 if (ret) 2249 return ret; 2250 2251 *((uint32_t *)value) = metrics_table.TemperatureHBM * 2252 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2253 *size = 4; 2254 break; 2255 case AMDGPU_PP_SENSOR_UVD_POWER: 2256 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; 2257 *size = 4; 2258 break; 2259 case AMDGPU_PP_SENSOR_VCE_POWER: 2260 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; 2261 *size = 4; 2262 break; 2263 case AMDGPU_PP_SENSOR_GPU_POWER: 2264 *size = 16; 2265 ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value); 2266 break; 2267 case AMDGPU_PP_SENSOR_VDDGFX: 2268 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & 2269 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 2270 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 2271 *((uint32_t *)value) = 2272 (uint32_t)convert_to_vddc((uint8_t)val_vid); 2273 break; 2274 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2275 ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value); 2276 if (!ret) 2277 *size = 8; 2278 break; 2279 default: 2280 ret = -EOPNOTSUPP; 2281 break; 2282 } 2283 return ret; 2284 } 2285 2286 static int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 2287 struct pp_display_clock_request *clock_req) 2288 { 2289 int result = 0; 2290 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2291 enum amd_pp_clock_type clk_type = clock_req->clock_type; 2292 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 2293 PPCLK_e clk_select = 0; 2294 uint32_t clk_request = 0; 2295 2296 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { 2297 switch (clk_type) { 2298 case amd_pp_dcef_clock: 2299 clk_select = PPCLK_DCEFCLK; 2300 break; 2301 case amd_pp_disp_clock: 2302 clk_select = PPCLK_DISPCLK; 2303 break; 2304 case amd_pp_pixel_clock: 2305 clk_select = PPCLK_PIXCLK; 2306 break; 2307 case amd_pp_phy_clock: 2308 clk_select = PPCLK_PHYCLK; 2309 break; 2310 default: 2311 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); 2312 result = -EINVAL; 2313 break; 2314 } 2315 2316 if (!result) { 2317 clk_request = (clk_select << 16) | clk_freq; 2318 result = smum_send_msg_to_smc_with_parameter(hwmgr, 2319 PPSMC_MSG_SetHardMinByFreq, 2320 clk_request, 2321 NULL); 2322 } 2323 } 2324 2325 return result; 2326 } 2327 2328 static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 2329 PHM_PerformanceLevelDesignation designation, uint32_t index, 2330 PHM_PerformanceLevel *level) 2331 { 2332 return 0; 2333 } 2334 2335 static int vega20_notify_smc_display_config_after_ps_adjustment( 2336 struct pp_hwmgr *hwmgr) 2337 { 2338 struct vega20_hwmgr *data = 2339 (struct vega20_hwmgr *)(hwmgr->backend); 2340 struct vega20_single_dpm_table *dpm_table = 2341 &data->dpm_table.mem_table; 2342 struct PP_Clocks min_clocks = {0}; 2343 struct pp_display_clock_request clock_req; 2344 int ret = 0; 2345 2346 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; 2347 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; 2348 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 2349 2350 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { 2351 clock_req.clock_type = amd_pp_dcef_clock; 2352 clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10; 2353 if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) { 2354 if (data->smu_features[GNLD_DS_DCEFCLK].supported) 2355 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter( 2356 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 2357 min_clocks.dcefClockInSR / 100, 2358 NULL)) == 0, 2359 "Attempt to set divider for DCEFCLK Failed!", 2360 return ret); 2361 } else { 2362 pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); 2363 } 2364 } 2365 2366 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 2367 dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100; 2368 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2369 PPSMC_MSG_SetHardMinByFreq, 2370 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, 2371 NULL)), 2372 "[SetHardMinFreq] Set hard min uclk failed!", 2373 return ret); 2374 } 2375 2376 return 0; 2377 } 2378 2379 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) 2380 { 2381 struct vega20_hwmgr *data = 2382 (struct vega20_hwmgr *)(hwmgr->backend); 2383 uint32_t soft_level; 2384 int ret = 0; 2385 2386 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 2387 2388 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2389 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2390 data->dpm_table.gfx_table.dpm_levels[soft_level].value; 2391 2392 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); 2393 2394 data->dpm_table.mem_table.dpm_state.soft_min_level = 2395 data->dpm_table.mem_table.dpm_state.soft_max_level = 2396 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2397 2398 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); 2399 2400 data->dpm_table.soc_table.dpm_state.soft_min_level = 2401 data->dpm_table.soc_table.dpm_state.soft_max_level = 2402 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2403 2404 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2405 FEATURE_DPM_UCLK_MASK | 2406 FEATURE_DPM_SOCCLK_MASK); 2407 PP_ASSERT_WITH_CODE(!ret, 2408 "Failed to upload boot level to highest!", 2409 return ret); 2410 2411 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2412 FEATURE_DPM_UCLK_MASK | 2413 FEATURE_DPM_SOCCLK_MASK); 2414 PP_ASSERT_WITH_CODE(!ret, 2415 "Failed to upload dpm max level to highest!", 2416 return ret); 2417 2418 return 0; 2419 } 2420 2421 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) 2422 { 2423 struct vega20_hwmgr *data = 2424 (struct vega20_hwmgr *)(hwmgr->backend); 2425 uint32_t soft_level; 2426 int ret = 0; 2427 2428 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 2429 2430 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2431 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2432 data->dpm_table.gfx_table.dpm_levels[soft_level].value; 2433 2434 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 2435 2436 data->dpm_table.mem_table.dpm_state.soft_min_level = 2437 data->dpm_table.mem_table.dpm_state.soft_max_level = 2438 data->dpm_table.mem_table.dpm_levels[soft_level].value; 2439 2440 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 2441 2442 data->dpm_table.soc_table.dpm_state.soft_min_level = 2443 data->dpm_table.soc_table.dpm_state.soft_max_level = 2444 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2445 2446 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2447 FEATURE_DPM_UCLK_MASK | 2448 FEATURE_DPM_SOCCLK_MASK); 2449 PP_ASSERT_WITH_CODE(!ret, 2450 "Failed to upload boot level to highest!", 2451 return ret); 2452 2453 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2454 FEATURE_DPM_UCLK_MASK | 2455 FEATURE_DPM_SOCCLK_MASK); 2456 PP_ASSERT_WITH_CODE(!ret, 2457 "Failed to upload dpm max level to highest!", 2458 return ret); 2459 2460 return 0; 2461 2462 } 2463 2464 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2465 { 2466 struct vega20_hwmgr *data = 2467 (struct vega20_hwmgr *)(hwmgr->backend); 2468 uint32_t soft_min_level, soft_max_level; 2469 int ret = 0; 2470 2471 /* gfxclk soft min/max settings */ 2472 soft_min_level = 2473 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); 2474 soft_max_level = 2475 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); 2476 2477 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2478 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2479 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2480 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; 2481 2482 /* uclk soft min/max settings */ 2483 soft_min_level = 2484 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); 2485 soft_max_level = 2486 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); 2487 2488 data->dpm_table.mem_table.dpm_state.soft_min_level = 2489 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2490 data->dpm_table.mem_table.dpm_state.soft_max_level = 2491 data->dpm_table.mem_table.dpm_levels[soft_max_level].value; 2492 2493 /* socclk soft min/max settings */ 2494 soft_min_level = 2495 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); 2496 soft_max_level = 2497 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); 2498 2499 data->dpm_table.soc_table.dpm_state.soft_min_level = 2500 data->dpm_table.soc_table.dpm_levels[soft_min_level].value; 2501 data->dpm_table.soc_table.dpm_state.soft_max_level = 2502 data->dpm_table.soc_table.dpm_levels[soft_max_level].value; 2503 2504 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2505 FEATURE_DPM_UCLK_MASK | 2506 FEATURE_DPM_SOCCLK_MASK); 2507 PP_ASSERT_WITH_CODE(!ret, 2508 "Failed to upload DPM Bootup Levels!", 2509 return ret); 2510 2511 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | 2512 FEATURE_DPM_UCLK_MASK | 2513 FEATURE_DPM_SOCCLK_MASK); 2514 PP_ASSERT_WITH_CODE(!ret, 2515 "Failed to upload DPM Max Levels!", 2516 return ret); 2517 2518 return 0; 2519 } 2520 2521 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, 2522 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) 2523 { 2524 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2525 struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table); 2526 struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table); 2527 struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table); 2528 2529 *sclk_mask = 0; 2530 *mclk_mask = 0; 2531 *soc_mask = 0; 2532 2533 if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && 2534 mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL && 2535 soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) { 2536 *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL; 2537 *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL; 2538 *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL; 2539 } 2540 2541 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 2542 *sclk_mask = 0; 2543 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 2544 *mclk_mask = 0; 2545 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 2546 *sclk_mask = gfx_dpm_table->count - 1; 2547 *mclk_mask = mem_dpm_table->count - 1; 2548 *soc_mask = soc_dpm_table->count - 1; 2549 } 2550 2551 return 0; 2552 } 2553 2554 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, 2555 enum pp_clock_type type, uint32_t mask) 2556 { 2557 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2558 uint32_t soft_min_level, soft_max_level, hard_min_level; 2559 int ret = 0; 2560 2561 switch (type) { 2562 case PP_SCLK: 2563 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2564 soft_max_level = mask ? (fls(mask) - 1) : 0; 2565 2566 if (soft_max_level >= data->dpm_table.gfx_table.count) { 2567 pr_err("Clock level specified %d is over max allowed %d\n", 2568 soft_max_level, 2569 data->dpm_table.gfx_table.count - 1); 2570 return -EINVAL; 2571 } 2572 2573 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2574 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2575 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2576 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; 2577 2578 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); 2579 PP_ASSERT_WITH_CODE(!ret, 2580 "Failed to upload boot level to lowest!", 2581 return ret); 2582 2583 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); 2584 PP_ASSERT_WITH_CODE(!ret, 2585 "Failed to upload dpm max level to highest!", 2586 return ret); 2587 break; 2588 2589 case PP_MCLK: 2590 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2591 soft_max_level = mask ? (fls(mask) - 1) : 0; 2592 2593 if (soft_max_level >= data->dpm_table.mem_table.count) { 2594 pr_err("Clock level specified %d is over max allowed %d\n", 2595 soft_max_level, 2596 data->dpm_table.mem_table.count - 1); 2597 return -EINVAL; 2598 } 2599 2600 data->dpm_table.mem_table.dpm_state.soft_min_level = 2601 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2602 data->dpm_table.mem_table.dpm_state.soft_max_level = 2603 data->dpm_table.mem_table.dpm_levels[soft_max_level].value; 2604 2605 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK); 2606 PP_ASSERT_WITH_CODE(!ret, 2607 "Failed to upload boot level to lowest!", 2608 return ret); 2609 2610 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK); 2611 PP_ASSERT_WITH_CODE(!ret, 2612 "Failed to upload dpm max level to highest!", 2613 return ret); 2614 2615 break; 2616 2617 case PP_SOCCLK: 2618 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2619 soft_max_level = mask ? (fls(mask) - 1) : 0; 2620 2621 if (soft_max_level >= data->dpm_table.soc_table.count) { 2622 pr_err("Clock level specified %d is over max allowed %d\n", 2623 soft_max_level, 2624 data->dpm_table.soc_table.count - 1); 2625 return -EINVAL; 2626 } 2627 2628 data->dpm_table.soc_table.dpm_state.soft_min_level = 2629 data->dpm_table.soc_table.dpm_levels[soft_min_level].value; 2630 data->dpm_table.soc_table.dpm_state.soft_max_level = 2631 data->dpm_table.soc_table.dpm_levels[soft_max_level].value; 2632 2633 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); 2634 PP_ASSERT_WITH_CODE(!ret, 2635 "Failed to upload boot level to lowest!", 2636 return ret); 2637 2638 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); 2639 PP_ASSERT_WITH_CODE(!ret, 2640 "Failed to upload dpm max level to highest!", 2641 return ret); 2642 2643 break; 2644 2645 case PP_FCLK: 2646 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2647 soft_max_level = mask ? (fls(mask) - 1) : 0; 2648 2649 if (soft_max_level >= data->dpm_table.fclk_table.count) { 2650 pr_err("Clock level specified %d is over max allowed %d\n", 2651 soft_max_level, 2652 data->dpm_table.fclk_table.count - 1); 2653 return -EINVAL; 2654 } 2655 2656 data->dpm_table.fclk_table.dpm_state.soft_min_level = 2657 data->dpm_table.fclk_table.dpm_levels[soft_min_level].value; 2658 data->dpm_table.fclk_table.dpm_state.soft_max_level = 2659 data->dpm_table.fclk_table.dpm_levels[soft_max_level].value; 2660 2661 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK); 2662 PP_ASSERT_WITH_CODE(!ret, 2663 "Failed to upload boot level to lowest!", 2664 return ret); 2665 2666 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK); 2667 PP_ASSERT_WITH_CODE(!ret, 2668 "Failed to upload dpm max level to highest!", 2669 return ret); 2670 2671 break; 2672 2673 case PP_DCEFCLK: 2674 hard_min_level = mask ? (ffs(mask) - 1) : 0; 2675 2676 if (hard_min_level >= data->dpm_table.dcef_table.count) { 2677 pr_err("Clock level specified %d is over max allowed %d\n", 2678 hard_min_level, 2679 data->dpm_table.dcef_table.count - 1); 2680 return -EINVAL; 2681 } 2682 2683 data->dpm_table.dcef_table.dpm_state.hard_min_level = 2684 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value; 2685 2686 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK); 2687 PP_ASSERT_WITH_CODE(!ret, 2688 "Failed to upload boot level to lowest!", 2689 return ret); 2690 2691 //TODO: Setting DCEFCLK max dpm level is not supported 2692 2693 break; 2694 2695 case PP_PCIE: 2696 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2697 soft_max_level = mask ? (fls(mask) - 1) : 0; 2698 if (soft_min_level >= NUM_LINK_LEVELS || 2699 soft_max_level >= NUM_LINK_LEVELS) 2700 return -EINVAL; 2701 2702 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2703 PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level, 2704 NULL); 2705 PP_ASSERT_WITH_CODE(!ret, 2706 "Failed to set min link dpm level!", 2707 return ret); 2708 2709 break; 2710 2711 default: 2712 break; 2713 } 2714 2715 return 0; 2716 } 2717 2718 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, 2719 enum amd_dpm_forced_level level) 2720 { 2721 int ret = 0; 2722 uint32_t sclk_mask, mclk_mask, soc_mask; 2723 2724 switch (level) { 2725 case AMD_DPM_FORCED_LEVEL_HIGH: 2726 ret = vega20_force_dpm_highest(hwmgr); 2727 break; 2728 2729 case AMD_DPM_FORCED_LEVEL_LOW: 2730 ret = vega20_force_dpm_lowest(hwmgr); 2731 break; 2732 2733 case AMD_DPM_FORCED_LEVEL_AUTO: 2734 ret = vega20_unforce_dpm_levels(hwmgr); 2735 break; 2736 2737 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 2738 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 2739 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 2740 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 2741 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); 2742 if (ret) 2743 return ret; 2744 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask); 2745 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask); 2746 vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask); 2747 break; 2748 2749 case AMD_DPM_FORCED_LEVEL_MANUAL: 2750 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 2751 default: 2752 break; 2753 } 2754 2755 return ret; 2756 } 2757 2758 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr) 2759 { 2760 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2761 2762 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false) 2763 return AMD_FAN_CTRL_MANUAL; 2764 else 2765 return AMD_FAN_CTRL_AUTO; 2766 } 2767 2768 static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 2769 { 2770 switch (mode) { 2771 case AMD_FAN_CTRL_NONE: 2772 vega20_fan_ctrl_set_fan_speed_pwm(hwmgr, 255); 2773 break; 2774 case AMD_FAN_CTRL_MANUAL: 2775 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 2776 vega20_fan_ctrl_stop_smc_fan_control(hwmgr); 2777 break; 2778 case AMD_FAN_CTRL_AUTO: 2779 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 2780 vega20_fan_ctrl_start_smc_fan_control(hwmgr); 2781 break; 2782 default: 2783 break; 2784 } 2785 } 2786 2787 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr, 2788 struct amd_pp_simple_clock_info *info) 2789 { 2790 #if 0 2791 struct phm_ppt_v2_information *table_info = 2792 (struct phm_ppt_v2_information *)hwmgr->pptable; 2793 struct phm_clock_and_voltage_limits *max_limits = 2794 &table_info->max_clock_voltage_on_ac; 2795 2796 info->engine_max_clock = max_limits->sclk; 2797 info->memory_max_clock = max_limits->mclk; 2798 #endif 2799 return 0; 2800 } 2801 2802 2803 static int vega20_get_sclks(struct pp_hwmgr *hwmgr, 2804 struct pp_clock_levels_with_latency *clocks) 2805 { 2806 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2807 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); 2808 int i, count; 2809 2810 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled) 2811 return -1; 2812 2813 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2814 clocks->num_levels = count; 2815 2816 for (i = 0; i < count; i++) { 2817 clocks->data[i].clocks_in_khz = 2818 dpm_table->dpm_levels[i].value * 1000; 2819 clocks->data[i].latency_in_us = 0; 2820 } 2821 2822 return 0; 2823 } 2824 2825 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr, 2826 uint32_t clock) 2827 { 2828 return 25; 2829 } 2830 2831 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, 2832 struct pp_clock_levels_with_latency *clocks) 2833 { 2834 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2835 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); 2836 int i, count; 2837 2838 if (!data->smu_features[GNLD_DPM_UCLK].enabled) 2839 return -1; 2840 2841 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2842 clocks->num_levels = data->mclk_latency_table.count = count; 2843 2844 for (i = 0; i < count; i++) { 2845 clocks->data[i].clocks_in_khz = 2846 data->mclk_latency_table.entries[i].frequency = 2847 dpm_table->dpm_levels[i].value * 1000; 2848 clocks->data[i].latency_in_us = 2849 data->mclk_latency_table.entries[i].latency = 2850 vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); 2851 } 2852 2853 return 0; 2854 } 2855 2856 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, 2857 struct pp_clock_levels_with_latency *clocks) 2858 { 2859 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2860 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); 2861 int i, count; 2862 2863 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled) 2864 return -1; 2865 2866 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2867 clocks->num_levels = count; 2868 2869 for (i = 0; i < count; i++) { 2870 clocks->data[i].clocks_in_khz = 2871 dpm_table->dpm_levels[i].value * 1000; 2872 clocks->data[i].latency_in_us = 0; 2873 } 2874 2875 return 0; 2876 } 2877 2878 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, 2879 struct pp_clock_levels_with_latency *clocks) 2880 { 2881 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2882 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); 2883 int i, count; 2884 2885 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled) 2886 return -1; 2887 2888 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2889 clocks->num_levels = count; 2890 2891 for (i = 0; i < count; i++) { 2892 clocks->data[i].clocks_in_khz = 2893 dpm_table->dpm_levels[i].value * 1000; 2894 clocks->data[i].latency_in_us = 0; 2895 } 2896 2897 return 0; 2898 2899 } 2900 2901 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 2902 enum amd_pp_clock_type type, 2903 struct pp_clock_levels_with_latency *clocks) 2904 { 2905 int ret; 2906 2907 switch (type) { 2908 case amd_pp_sys_clock: 2909 ret = vega20_get_sclks(hwmgr, clocks); 2910 break; 2911 case amd_pp_mem_clock: 2912 ret = vega20_get_memclocks(hwmgr, clocks); 2913 break; 2914 case amd_pp_dcef_clock: 2915 ret = vega20_get_dcefclocks(hwmgr, clocks); 2916 break; 2917 case amd_pp_soc_clock: 2918 ret = vega20_get_socclocks(hwmgr, clocks); 2919 break; 2920 default: 2921 return -EINVAL; 2922 } 2923 2924 return ret; 2925 } 2926 2927 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 2928 enum amd_pp_clock_type type, 2929 struct pp_clock_levels_with_voltage *clocks) 2930 { 2931 clocks->num_levels = 0; 2932 2933 return 0; 2934 } 2935 2936 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 2937 void *clock_ranges) 2938 { 2939 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 2940 Watermarks_t *table = &(data->smc_state_table.water_marks_table); 2941 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; 2942 2943 if (!data->registry_data.disable_water_mark && 2944 data->smu_features[GNLD_DPM_DCEFCLK].supported && 2945 data->smu_features[GNLD_DPM_SOCCLK].supported) { 2946 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); 2947 data->water_marks_bitmap |= WaterMarksExist; 2948 data->water_marks_bitmap &= ~WaterMarksLoaded; 2949 } 2950 2951 return 0; 2952 } 2953 2954 static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 2955 enum PP_OD_DPM_TABLE_COMMAND type, 2956 long *input, uint32_t size) 2957 { 2958 struct vega20_hwmgr *data = 2959 (struct vega20_hwmgr *)(hwmgr->backend); 2960 struct vega20_od8_single_setting *od8_settings = 2961 data->od8_settings.od8_settings_array; 2962 OverDriveTable_t *od_table = 2963 &(data->smc_state_table.overdrive_table); 2964 int32_t input_clk, input_vol, i; 2965 uint32_t input_index; 2966 int od8_id; 2967 int ret; 2968 2969 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", 2970 return -EINVAL); 2971 2972 switch (type) { 2973 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2974 if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 2975 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) { 2976 pr_info("Sclk min/max frequency overdrive not supported\n"); 2977 return -EOPNOTSUPP; 2978 } 2979 2980 for (i = 0; i < size; i += 2) { 2981 if (i + 2 > size) { 2982 pr_info("invalid number of input parameters %d\n", 2983 size); 2984 return -EINVAL; 2985 } 2986 2987 input_index = input[i]; 2988 input_clk = input[i + 1]; 2989 2990 if (input_index != 0 && input_index != 1) { 2991 pr_info("Invalid index %d\n", input_index); 2992 pr_info("Support min/max sclk frequency setting only which index by 0/1\n"); 2993 return -EINVAL; 2994 } 2995 2996 if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value || 2997 input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) { 2998 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 2999 input_clk, 3000 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, 3001 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); 3002 return -EINVAL; 3003 } 3004 3005 if ((input_index == 0 && od_table->GfxclkFmin != input_clk) || 3006 (input_index == 1 && od_table->GfxclkFmax != input_clk)) 3007 data->gfxclk_overdrive = true; 3008 3009 if (input_index == 0) 3010 od_table->GfxclkFmin = input_clk; 3011 else 3012 od_table->GfxclkFmax = input_clk; 3013 } 3014 3015 break; 3016 3017 case PP_OD_EDIT_MCLK_VDDC_TABLE: 3018 if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3019 pr_info("Mclk max frequency overdrive not supported\n"); 3020 return -EOPNOTSUPP; 3021 } 3022 3023 for (i = 0; i < size; i += 2) { 3024 if (i + 2 > size) { 3025 pr_info("invalid number of input parameters %d\n", 3026 size); 3027 return -EINVAL; 3028 } 3029 3030 input_index = input[i]; 3031 input_clk = input[i + 1]; 3032 3033 if (input_index != 1) { 3034 pr_info("Invalid index %d\n", input_index); 3035 pr_info("Support max Mclk frequency setting only which index by 1\n"); 3036 return -EINVAL; 3037 } 3038 3039 if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || 3040 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { 3041 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 3042 input_clk, 3043 od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 3044 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3045 return -EINVAL; 3046 } 3047 3048 if (input_index == 1 && od_table->UclkFmax != input_clk) 3049 data->memclk_overdrive = true; 3050 3051 od_table->UclkFmax = input_clk; 3052 } 3053 3054 break; 3055 3056 case PP_OD_EDIT_VDDC_CURVE: 3057 if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3058 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3059 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3060 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3061 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3062 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) { 3063 pr_info("Voltage curve calibrate not supported\n"); 3064 return -EOPNOTSUPP; 3065 } 3066 3067 for (i = 0; i < size; i += 3) { 3068 if (i + 3 > size) { 3069 pr_info("invalid number of input parameters %d\n", 3070 size); 3071 return -EINVAL; 3072 } 3073 3074 input_index = input[i]; 3075 input_clk = input[i + 1]; 3076 input_vol = input[i + 2]; 3077 3078 if (input_index > 2) { 3079 pr_info("Setting for point %d is not supported\n", 3080 input_index + 1); 3081 pr_info("Three supported points index by 0, 1, 2\n"); 3082 return -EINVAL; 3083 } 3084 3085 od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index; 3086 if (input_clk < od8_settings[od8_id].min_value || 3087 input_clk > od8_settings[od8_id].max_value) { 3088 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 3089 input_clk, 3090 od8_settings[od8_id].min_value, 3091 od8_settings[od8_id].max_value); 3092 return -EINVAL; 3093 } 3094 3095 od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index; 3096 if (input_vol < od8_settings[od8_id].min_value || 3097 input_vol > od8_settings[od8_id].max_value) { 3098 pr_info("clock voltage %d is not within allowed range [%d - %d]\n", 3099 input_vol, 3100 od8_settings[od8_id].min_value, 3101 od8_settings[od8_id].max_value); 3102 return -EINVAL; 3103 } 3104 3105 switch (input_index) { 3106 case 0: 3107 od_table->GfxclkFreq1 = input_clk; 3108 od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE; 3109 break; 3110 case 1: 3111 od_table->GfxclkFreq2 = input_clk; 3112 od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE; 3113 break; 3114 case 2: 3115 od_table->GfxclkFreq3 = input_clk; 3116 od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE; 3117 break; 3118 } 3119 } 3120 break; 3121 3122 case PP_OD_RESTORE_DEFAULT_TABLE: 3123 data->gfxclk_overdrive = false; 3124 data->memclk_overdrive = false; 3125 3126 ret = smum_smc_table_manager(hwmgr, 3127 (uint8_t *)od_table, 3128 TABLE_OVERDRIVE, true); 3129 PP_ASSERT_WITH_CODE(!ret, 3130 "Failed to export overdrive table!", 3131 return ret); 3132 break; 3133 3134 case PP_OD_COMMIT_DPM_TABLE: 3135 ret = smum_smc_table_manager(hwmgr, 3136 (uint8_t *)od_table, 3137 TABLE_OVERDRIVE, false); 3138 PP_ASSERT_WITH_CODE(!ret, 3139 "Failed to import overdrive table!", 3140 return ret); 3141 3142 /* retrieve updated gfxclk table */ 3143 if (data->gfxclk_overdrive) { 3144 data->gfxclk_overdrive = false; 3145 3146 ret = vega20_setup_gfxclk_dpm_table(hwmgr); 3147 if (ret) 3148 return ret; 3149 } 3150 3151 /* retrieve updated memclk table */ 3152 if (data->memclk_overdrive) { 3153 data->memclk_overdrive = false; 3154 3155 ret = vega20_setup_memclk_dpm_table(hwmgr); 3156 if (ret) 3157 return ret; 3158 } 3159 break; 3160 3161 default: 3162 return -EINVAL; 3163 } 3164 3165 return 0; 3166 } 3167 3168 static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr, 3169 enum pp_mp1_state mp1_state) 3170 { 3171 uint16_t msg; 3172 int ret; 3173 3174 switch (mp1_state) { 3175 case PP_MP1_STATE_SHUTDOWN: 3176 msg = PPSMC_MSG_PrepareMp1ForShutdown; 3177 break; 3178 case PP_MP1_STATE_UNLOAD: 3179 msg = PPSMC_MSG_PrepareMp1ForUnload; 3180 break; 3181 case PP_MP1_STATE_RESET: 3182 msg = PPSMC_MSG_PrepareMp1ForReset; 3183 break; 3184 case PP_MP1_STATE_NONE: 3185 default: 3186 return 0; 3187 } 3188 3189 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, 3190 "[PrepareMp1] Failed!", 3191 return ret); 3192 3193 return 0; 3194 } 3195 3196 static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) 3197 { 3198 static const char *ppfeature_name[] = { 3199 "DPM_PREFETCHER", 3200 "GFXCLK_DPM", 3201 "UCLK_DPM", 3202 "SOCCLK_DPM", 3203 "UVD_DPM", 3204 "VCE_DPM", 3205 "ULV", 3206 "MP0CLK_DPM", 3207 "LINK_DPM", 3208 "DCEFCLK_DPM", 3209 "GFXCLK_DS", 3210 "SOCCLK_DS", 3211 "LCLK_DS", 3212 "PPT", 3213 "TDC", 3214 "THERMAL", 3215 "GFX_PER_CU_CG", 3216 "RM", 3217 "DCEFCLK_DS", 3218 "ACDC", 3219 "VR0HOT", 3220 "VR1HOT", 3221 "FW_CTF", 3222 "LED_DISPLAY", 3223 "FAN_CONTROL", 3224 "GFX_EDC", 3225 "GFXOFF", 3226 "CG", 3227 "FCLK_DPM", 3228 "FCLK_DS", 3229 "MP1CLK_DS", 3230 "MP0CLK_DS", 3231 "XGMI", 3232 "ECC"}; 3233 static const char *output_title[] = { 3234 "FEATURES", 3235 "BITMASK", 3236 "ENABLEMENT"}; 3237 uint64_t features_enabled; 3238 int i; 3239 int ret = 0; 3240 int size = 0; 3241 3242 phm_get_sysfs_buf(&buf, &size); 3243 3244 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3245 PP_ASSERT_WITH_CODE(!ret, 3246 "[EnableAllSmuFeatures] Failed to get enabled smc features!", 3247 return ret); 3248 3249 size += sysfs_emit_at(buf, size, "Current ppfeatures: 0x%016llx\n", features_enabled); 3250 size += sysfs_emit_at(buf, size, "%-19s %-22s %s\n", 3251 output_title[0], 3252 output_title[1], 3253 output_title[2]); 3254 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 3255 size += sysfs_emit_at(buf, size, "%-19s 0x%016llx %6s\n", 3256 ppfeature_name[i], 3257 1ULL << i, 3258 (features_enabled & (1ULL << i)) ? "Y" : "N"); 3259 } 3260 3261 return size; 3262 } 3263 3264 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) 3265 { 3266 struct vega20_hwmgr *data = 3267 (struct vega20_hwmgr *)(hwmgr->backend); 3268 uint64_t features_enabled, features_to_enable, features_to_disable; 3269 int i, ret = 0; 3270 bool enabled; 3271 3272 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) 3273 return -EINVAL; 3274 3275 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3276 if (ret) 3277 return ret; 3278 3279 features_to_disable = 3280 features_enabled & ~new_ppfeature_masks; 3281 features_to_enable = 3282 ~features_enabled & new_ppfeature_masks; 3283 3284 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 3285 pr_debug("features_to_enable 0x%llx\n", features_to_enable); 3286 3287 if (features_to_disable) { 3288 ret = vega20_enable_smc_features(hwmgr, false, features_to_disable); 3289 if (ret) 3290 return ret; 3291 } 3292 3293 if (features_to_enable) { 3294 ret = vega20_enable_smc_features(hwmgr, true, features_to_enable); 3295 if (ret) 3296 return ret; 3297 } 3298 3299 /* Update the cached feature enablement state */ 3300 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); 3301 if (ret) 3302 return ret; 3303 3304 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 3305 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? 3306 true : false; 3307 data->smu_features[i].enabled = enabled; 3308 } 3309 3310 return 0; 3311 } 3312 3313 static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) 3314 { 3315 struct amdgpu_device *adev = hwmgr->adev; 3316 3317 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 3318 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 3319 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 3320 } 3321 3322 static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr) 3323 { 3324 uint32_t width_level; 3325 3326 width_level = vega20_get_current_pcie_link_width_level(hwmgr); 3327 if (width_level > LINK_WIDTH_MAX) 3328 width_level = 0; 3329 3330 return link_width[width_level]; 3331 } 3332 3333 static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) 3334 { 3335 struct amdgpu_device *adev = hwmgr->adev; 3336 3337 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 3338 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 3339 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 3340 } 3341 3342 static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr) 3343 { 3344 uint32_t speed_level; 3345 3346 speed_level = vega20_get_current_pcie_link_speed_level(hwmgr); 3347 if (speed_level > LINK_SPEED_MAX) 3348 speed_level = 0; 3349 3350 return link_speed[speed_level]; 3351 } 3352 3353 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, 3354 enum pp_clock_type type, char *buf) 3355 { 3356 struct vega20_hwmgr *data = 3357 (struct vega20_hwmgr *)(hwmgr->backend); 3358 struct vega20_od8_single_setting *od8_settings = 3359 data->od8_settings.od8_settings_array; 3360 OverDriveTable_t *od_table = 3361 &(data->smc_state_table.overdrive_table); 3362 PPTable_t *pptable = &(data->smc_state_table.pp_table); 3363 struct pp_clock_levels_with_latency clocks; 3364 struct vega20_single_dpm_table *fclk_dpm_table = 3365 &(data->dpm_table.fclk_table); 3366 int i, now, size = 0; 3367 int ret = 0; 3368 uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; 3369 3370 switch (type) { 3371 case PP_SCLK: 3372 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); 3373 PP_ASSERT_WITH_CODE(!ret, 3374 "Attempt to get current gfx clk Failed!", 3375 return ret); 3376 3377 if (vega20_get_sclks(hwmgr, &clocks)) { 3378 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3379 now / 100); 3380 break; 3381 } 3382 3383 for (i = 0; i < clocks.num_levels; i++) 3384 size += sprintf(buf + size, "%d: %uMhz %s\n", 3385 i, clocks.data[i].clocks_in_khz / 1000, 3386 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3387 break; 3388 3389 case PP_MCLK: 3390 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now); 3391 PP_ASSERT_WITH_CODE(!ret, 3392 "Attempt to get current mclk freq Failed!", 3393 return ret); 3394 3395 if (vega20_get_memclocks(hwmgr, &clocks)) { 3396 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3397 now / 100); 3398 break; 3399 } 3400 3401 for (i = 0; i < clocks.num_levels; i++) 3402 size += sprintf(buf + size, "%d: %uMhz %s\n", 3403 i, clocks.data[i].clocks_in_khz / 1000, 3404 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3405 break; 3406 3407 case PP_SOCCLK: 3408 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now); 3409 PP_ASSERT_WITH_CODE(!ret, 3410 "Attempt to get current socclk freq Failed!", 3411 return ret); 3412 3413 if (vega20_get_socclocks(hwmgr, &clocks)) { 3414 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3415 now / 100); 3416 break; 3417 } 3418 3419 for (i = 0; i < clocks.num_levels; i++) 3420 size += sprintf(buf + size, "%d: %uMhz %s\n", 3421 i, clocks.data[i].clocks_in_khz / 1000, 3422 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3423 break; 3424 3425 case PP_FCLK: 3426 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now); 3427 PP_ASSERT_WITH_CODE(!ret, 3428 "Attempt to get current fclk freq Failed!", 3429 return ret); 3430 3431 for (i = 0; i < fclk_dpm_table->count; i++) 3432 size += sprintf(buf + size, "%d: %uMhz %s\n", 3433 i, fclk_dpm_table->dpm_levels[i].value, 3434 fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); 3435 break; 3436 3437 case PP_DCEFCLK: 3438 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now); 3439 PP_ASSERT_WITH_CODE(!ret, 3440 "Attempt to get current dcefclk freq Failed!", 3441 return ret); 3442 3443 if (vega20_get_dcefclocks(hwmgr, &clocks)) { 3444 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", 3445 now / 100); 3446 break; 3447 } 3448 3449 for (i = 0; i < clocks.num_levels; i++) 3450 size += sprintf(buf + size, "%d: %uMhz %s\n", 3451 i, clocks.data[i].clocks_in_khz / 1000, 3452 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); 3453 break; 3454 3455 case PP_PCIE: 3456 current_gen_speed = 3457 vega20_get_current_pcie_link_speed_level(hwmgr); 3458 current_lane_width = 3459 vega20_get_current_pcie_link_width_level(hwmgr); 3460 for (i = 0; i < NUM_LINK_LEVELS; i++) { 3461 gen_speed = pptable->PcieGenSpeed[i]; 3462 lane_width = pptable->PcieLaneCount[i]; 3463 3464 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, 3465 (gen_speed == 0) ? "2.5GT/s," : 3466 (gen_speed == 1) ? "5.0GT/s," : 3467 (gen_speed == 2) ? "8.0GT/s," : 3468 (gen_speed == 3) ? "16.0GT/s," : "", 3469 (lane_width == 1) ? "x1" : 3470 (lane_width == 2) ? "x2" : 3471 (lane_width == 3) ? "x4" : 3472 (lane_width == 4) ? "x8" : 3473 (lane_width == 5) ? "x12" : 3474 (lane_width == 6) ? "x16" : "", 3475 pptable->LclkFreq[i], 3476 (current_gen_speed == gen_speed) && 3477 (current_lane_width == lane_width) ? 3478 "*" : ""); 3479 } 3480 break; 3481 3482 case OD_SCLK: 3483 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 3484 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { 3485 size += sprintf(buf + size, "%s:\n", "OD_SCLK"); 3486 size += sprintf(buf + size, "0: %10uMhz\n", 3487 od_table->GfxclkFmin); 3488 size += sprintf(buf + size, "1: %10uMhz\n", 3489 od_table->GfxclkFmax); 3490 } 3491 break; 3492 3493 case OD_MCLK: 3494 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3495 size += sprintf(buf + size, "%s:\n", "OD_MCLK"); 3496 size += sprintf(buf + size, "1: %10uMhz\n", 3497 od_table->UclkFmax); 3498 } 3499 3500 break; 3501 3502 case OD_VDDC_CURVE: 3503 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3504 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3505 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3506 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3507 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3508 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { 3509 size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE"); 3510 size += sprintf(buf + size, "0: %10uMhz %10dmV\n", 3511 od_table->GfxclkFreq1, 3512 od_table->GfxclkVolt1 / VOLTAGE_SCALE); 3513 size += sprintf(buf + size, "1: %10uMhz %10dmV\n", 3514 od_table->GfxclkFreq2, 3515 od_table->GfxclkVolt2 / VOLTAGE_SCALE); 3516 size += sprintf(buf + size, "2: %10uMhz %10dmV\n", 3517 od_table->GfxclkFreq3, 3518 od_table->GfxclkVolt3 / VOLTAGE_SCALE); 3519 } 3520 3521 break; 3522 3523 case OD_RANGE: 3524 size += sprintf(buf + size, "%s:\n", "OD_RANGE"); 3525 3526 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && 3527 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { 3528 size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", 3529 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, 3530 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); 3531 } 3532 3533 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3534 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", 3535 od8_settings[OD8_SETTING_UCLK_FMAX].min_value, 3536 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3537 } 3538 3539 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && 3540 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && 3541 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && 3542 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && 3543 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && 3544 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { 3545 size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", 3546 od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, 3547 od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); 3548 size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", 3549 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, 3550 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); 3551 size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", 3552 od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, 3553 od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); 3554 size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", 3555 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, 3556 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); 3557 size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", 3558 od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, 3559 od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); 3560 size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", 3561 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, 3562 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); 3563 } 3564 3565 break; 3566 default: 3567 break; 3568 } 3569 return size; 3570 } 3571 3572 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, 3573 struct vega20_single_dpm_table *dpm_table) 3574 { 3575 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3576 int ret = 0; 3577 3578 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 3579 PP_ASSERT_WITH_CODE(dpm_table->count > 0, 3580 "[SetUclkToHightestDpmLevel] Dpm table has no entry!", 3581 return -EINVAL); 3582 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS, 3583 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!", 3584 return -EINVAL); 3585 3586 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3587 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3588 PPSMC_MSG_SetHardMinByFreq, 3589 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, 3590 NULL)), 3591 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", 3592 return ret); 3593 } 3594 3595 return ret; 3596 } 3597 3598 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) 3599 { 3600 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3601 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table); 3602 int ret = 0; 3603 3604 if (data->smu_features[GNLD_DPM_FCLK].enabled) { 3605 PP_ASSERT_WITH_CODE(dpm_table->count > 0, 3606 "[SetFclkToHightestDpmLevel] Dpm table has no entry!", 3607 return -EINVAL); 3608 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS, 3609 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!", 3610 return -EINVAL); 3611 3612 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3613 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3614 PPSMC_MSG_SetSoftMinByFreq, 3615 (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level, 3616 NULL)), 3617 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", 3618 return ret); 3619 } 3620 3621 return ret; 3622 } 3623 3624 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3625 { 3626 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3627 int ret = 0; 3628 3629 smum_send_msg_to_smc_with_parameter(hwmgr, 3630 PPSMC_MSG_NumOfDisplays, 0, NULL); 3631 3632 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, 3633 &data->dpm_table.mem_table); 3634 if (ret) 3635 return ret; 3636 3637 return vega20_set_fclk_to_highest_dpm_level(hwmgr); 3638 } 3639 3640 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3641 { 3642 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3643 int result = 0; 3644 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); 3645 3646 if ((data->water_marks_bitmap & WaterMarksExist) && 3647 !(data->water_marks_bitmap & WaterMarksLoaded)) { 3648 result = smum_smc_table_manager(hwmgr, 3649 (uint8_t *)wm_table, TABLE_WATERMARKS, false); 3650 PP_ASSERT_WITH_CODE(!result, 3651 "Failed to update WMTABLE!", 3652 return result); 3653 data->water_marks_bitmap |= WaterMarksLoaded; 3654 } 3655 3656 if ((data->water_marks_bitmap & WaterMarksExist) && 3657 data->smu_features[GNLD_DPM_DCEFCLK].supported && 3658 data->smu_features[GNLD_DPM_SOCCLK].supported) { 3659 result = smum_send_msg_to_smc_with_parameter(hwmgr, 3660 PPSMC_MSG_NumOfDisplays, 3661 hwmgr->display_config->num_display, 3662 NULL); 3663 } 3664 3665 return result; 3666 } 3667 3668 static int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) 3669 { 3670 struct vega20_hwmgr *data = 3671 (struct vega20_hwmgr *)(hwmgr->backend); 3672 int ret = 0; 3673 3674 if (data->smu_features[GNLD_DPM_UVD].supported) { 3675 if (data->smu_features[GNLD_DPM_UVD].enabled == enable) { 3676 if (enable) 3677 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n"); 3678 else 3679 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n"); 3680 } 3681 3682 ret = vega20_enable_smc_features(hwmgr, 3683 enable, 3684 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap); 3685 PP_ASSERT_WITH_CODE(!ret, 3686 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!", 3687 return ret); 3688 data->smu_features[GNLD_DPM_UVD].enabled = enable; 3689 } 3690 3691 return 0; 3692 } 3693 3694 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) 3695 { 3696 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3697 3698 if (data->vce_power_gated == bgate) 3699 return ; 3700 3701 data->vce_power_gated = bgate; 3702 if (bgate) { 3703 vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3704 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3705 AMD_IP_BLOCK_TYPE_VCE, 3706 AMD_PG_STATE_GATE); 3707 } else { 3708 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3709 AMD_IP_BLOCK_TYPE_VCE, 3710 AMD_PG_STATE_UNGATE); 3711 vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3712 } 3713 3714 } 3715 3716 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) 3717 { 3718 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3719 3720 if (data->uvd_power_gated == bgate) 3721 return ; 3722 3723 data->uvd_power_gated = bgate; 3724 vega20_enable_disable_uvd_dpm(hwmgr, !bgate); 3725 } 3726 3727 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) 3728 { 3729 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3730 struct vega20_single_dpm_table *dpm_table; 3731 bool vblank_too_short = false; 3732 bool disable_mclk_switching; 3733 bool disable_fclk_switching; 3734 uint32_t i, latency; 3735 3736 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 3737 !hwmgr->display_config->multi_monitor_in_sync) || 3738 vblank_too_short; 3739 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; 3740 3741 /* gfxclk */ 3742 dpm_table = &(data->dpm_table.gfx_table); 3743 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3744 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3745 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3746 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3747 3748 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3749 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { 3750 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 3751 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 3752 } 3753 3754 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3755 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3756 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; 3757 } 3758 3759 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3760 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3761 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3762 } 3763 } 3764 3765 /* memclk */ 3766 dpm_table = &(data->dpm_table.mem_table); 3767 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3768 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3769 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3770 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3771 3772 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3773 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { 3774 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 3775 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 3776 } 3777 3778 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 3779 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3780 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; 3781 } 3782 3783 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3784 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3785 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3786 } 3787 } 3788 3789 /* honour DAL's UCLK Hardmin */ 3790 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100)) 3791 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100; 3792 3793 /* Hardmin is dependent on displayconfig */ 3794 if (disable_mclk_switching) { 3795 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3796 for (i = 0; i < data->mclk_latency_table.count - 1; i++) { 3797 if (data->mclk_latency_table.entries[i].latency <= latency) { 3798 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) { 3799 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value; 3800 break; 3801 } 3802 } 3803 } 3804 } 3805 3806 if (hwmgr->display_config->nb_pstate_switch_disable) 3807 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3808 3809 if ((disable_mclk_switching && 3810 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) || 3811 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value) 3812 disable_fclk_switching = true; 3813 else 3814 disable_fclk_switching = false; 3815 3816 /* fclk */ 3817 dpm_table = &(data->dpm_table.fclk_table); 3818 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3819 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3820 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3821 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3822 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching) 3823 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3824 3825 /* vclk */ 3826 dpm_table = &(data->dpm_table.vclk_table); 3827 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3828 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3829 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3830 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3831 3832 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3833 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3834 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3835 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3836 } 3837 3838 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3839 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3840 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3841 } 3842 } 3843 3844 /* dclk */ 3845 dpm_table = &(data->dpm_table.dclk_table); 3846 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3847 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3848 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3849 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3850 3851 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3852 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3853 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3854 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; 3855 } 3856 3857 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3858 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3859 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3860 } 3861 } 3862 3863 /* socclk */ 3864 dpm_table = &(data->dpm_table.soc_table); 3865 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3866 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3867 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3868 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3869 3870 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3871 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { 3872 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; 3873 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; 3874 } 3875 3876 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3877 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3878 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3879 } 3880 } 3881 3882 /* eclk */ 3883 dpm_table = &(data->dpm_table.eclk_table); 3884 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3885 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3886 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3887 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3888 3889 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3890 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { 3891 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; 3892 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; 3893 } 3894 3895 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 3896 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3897 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3898 } 3899 } 3900 3901 return 0; 3902 } 3903 3904 static bool 3905 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 3906 { 3907 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3908 bool is_update_required = false; 3909 3910 if (data->display_timing.num_existing_displays != 3911 hwmgr->display_config->num_display) 3912 is_update_required = true; 3913 3914 if (data->registry_data.gfx_clk_deep_sleep_support && 3915 (data->display_timing.min_clock_in_sr != 3916 hwmgr->display_config->min_core_set_clock_in_sr)) 3917 is_update_required = true; 3918 3919 return is_update_required; 3920 } 3921 3922 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 3923 { 3924 int ret = 0; 3925 3926 ret = vega20_disable_all_smu_features(hwmgr); 3927 PP_ASSERT_WITH_CODE(!ret, 3928 "[DisableDpmTasks] Failed to disable all smu features!", 3929 return ret); 3930 3931 return 0; 3932 } 3933 3934 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr) 3935 { 3936 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3937 int result; 3938 3939 result = vega20_disable_dpm_tasks(hwmgr); 3940 PP_ASSERT_WITH_CODE((0 == result), 3941 "[PowerOffAsic] Failed to disable DPM!", 3942 ); 3943 data->water_marks_bitmap &= ~(WaterMarksLoaded); 3944 3945 return result; 3946 } 3947 3948 static int conv_power_profile_to_pplib_workload(int power_profile) 3949 { 3950 int pplib_workload = 0; 3951 3952 switch (power_profile) { 3953 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: 3954 pplib_workload = WORKLOAD_DEFAULT_BIT; 3955 break; 3956 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 3957 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 3958 break; 3959 case PP_SMC_POWER_PROFILE_POWERSAVING: 3960 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; 3961 break; 3962 case PP_SMC_POWER_PROFILE_VIDEO: 3963 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; 3964 break; 3965 case PP_SMC_POWER_PROFILE_VR: 3966 pplib_workload = WORKLOAD_PPLIB_VR_BIT; 3967 break; 3968 case PP_SMC_POWER_PROFILE_COMPUTE: 3969 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; 3970 break; 3971 case PP_SMC_POWER_PROFILE_CUSTOM: 3972 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT; 3973 break; 3974 } 3975 3976 return pplib_workload; 3977 } 3978 3979 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 3980 { 3981 DpmActivityMonitorCoeffInt_t activity_monitor; 3982 uint32_t i, size = 0; 3983 uint16_t workload_type = 0; 3984 static const char *title[] = { 3985 "PROFILE_INDEX(NAME)", 3986 "CLOCK_TYPE(NAME)", 3987 "FPS", 3988 "UseRlcBusy", 3989 "MinActiveFreqType", 3990 "MinActiveFreq", 3991 "BoosterFreqType", 3992 "BoosterFreq", 3993 "PD_Data_limit_c", 3994 "PD_Data_error_coeff", 3995 "PD_Data_error_rate_coeff"}; 3996 int result = 0; 3997 3998 if (!buf) 3999 return -EINVAL; 4000 4001 phm_get_sysfs_buf(&buf, &size); 4002 4003 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", 4004 title[0], title[1], title[2], title[3], title[4], title[5], 4005 title[6], title[7], title[8], title[9], title[10]); 4006 4007 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 4008 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 4009 workload_type = conv_power_profile_to_pplib_workload(i); 4010 result = vega20_get_activity_monitor_coeff(hwmgr, 4011 (uint8_t *)(&activity_monitor), workload_type); 4012 PP_ASSERT_WITH_CODE(!result, 4013 "[GetPowerProfile] Failed to get activity monitor!", 4014 return result); 4015 4016 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", 4017 i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); 4018 4019 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4020 " ", 4021 0, 4022 "GFXCLK", 4023 activity_monitor.Gfx_FPS, 4024 activity_monitor.Gfx_UseRlcBusy, 4025 activity_monitor.Gfx_MinActiveFreqType, 4026 activity_monitor.Gfx_MinActiveFreq, 4027 activity_monitor.Gfx_BoosterFreqType, 4028 activity_monitor.Gfx_BoosterFreq, 4029 activity_monitor.Gfx_PD_Data_limit_c, 4030 activity_monitor.Gfx_PD_Data_error_coeff, 4031 activity_monitor.Gfx_PD_Data_error_rate_coeff); 4032 4033 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4034 " ", 4035 1, 4036 "SOCCLK", 4037 activity_monitor.Soc_FPS, 4038 activity_monitor.Soc_UseRlcBusy, 4039 activity_monitor.Soc_MinActiveFreqType, 4040 activity_monitor.Soc_MinActiveFreq, 4041 activity_monitor.Soc_BoosterFreqType, 4042 activity_monitor.Soc_BoosterFreq, 4043 activity_monitor.Soc_PD_Data_limit_c, 4044 activity_monitor.Soc_PD_Data_error_coeff, 4045 activity_monitor.Soc_PD_Data_error_rate_coeff); 4046 4047 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4048 " ", 4049 2, 4050 "UCLK", 4051 activity_monitor.Mem_FPS, 4052 activity_monitor.Mem_UseRlcBusy, 4053 activity_monitor.Mem_MinActiveFreqType, 4054 activity_monitor.Mem_MinActiveFreq, 4055 activity_monitor.Mem_BoosterFreqType, 4056 activity_monitor.Mem_BoosterFreq, 4057 activity_monitor.Mem_PD_Data_limit_c, 4058 activity_monitor.Mem_PD_Data_error_coeff, 4059 activity_monitor.Mem_PD_Data_error_rate_coeff); 4060 4061 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 4062 " ", 4063 3, 4064 "FCLK", 4065 activity_monitor.Fclk_FPS, 4066 activity_monitor.Fclk_UseRlcBusy, 4067 activity_monitor.Fclk_MinActiveFreqType, 4068 activity_monitor.Fclk_MinActiveFreq, 4069 activity_monitor.Fclk_BoosterFreqType, 4070 activity_monitor.Fclk_BoosterFreq, 4071 activity_monitor.Fclk_PD_Data_limit_c, 4072 activity_monitor.Fclk_PD_Data_error_coeff, 4073 activity_monitor.Fclk_PD_Data_error_rate_coeff); 4074 } 4075 4076 return size; 4077 } 4078 4079 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 4080 { 4081 DpmActivityMonitorCoeffInt_t activity_monitor; 4082 int workload_type, result = 0; 4083 uint32_t power_profile_mode = input[size]; 4084 4085 if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 4086 pr_err("Invalid power profile mode %d\n", power_profile_mode); 4087 return -EINVAL; 4088 } 4089 4090 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 4091 struct vega20_hwmgr *data = 4092 (struct vega20_hwmgr *)(hwmgr->backend); 4093 if (size == 0 && !data->is_custom_profile_set) 4094 return -EINVAL; 4095 if (size < 10 && size != 0) 4096 return -EINVAL; 4097 4098 result = vega20_get_activity_monitor_coeff(hwmgr, 4099 (uint8_t *)(&activity_monitor), 4100 WORKLOAD_PPLIB_CUSTOM_BIT); 4101 PP_ASSERT_WITH_CODE(!result, 4102 "[SetPowerProfile] Failed to get activity monitor!", 4103 return result); 4104 4105 /* If size==0, then we want to apply the already-configured 4106 * CUSTOM profile again. Just apply it, since we checked its 4107 * validity above 4108 */ 4109 if (size == 0) 4110 goto out; 4111 4112 switch (input[0]) { 4113 case 0: /* Gfxclk */ 4114 activity_monitor.Gfx_FPS = input[1]; 4115 activity_monitor.Gfx_UseRlcBusy = input[2]; 4116 activity_monitor.Gfx_MinActiveFreqType = input[3]; 4117 activity_monitor.Gfx_MinActiveFreq = input[4]; 4118 activity_monitor.Gfx_BoosterFreqType = input[5]; 4119 activity_monitor.Gfx_BoosterFreq = input[6]; 4120 activity_monitor.Gfx_PD_Data_limit_c = input[7]; 4121 activity_monitor.Gfx_PD_Data_error_coeff = input[8]; 4122 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; 4123 break; 4124 case 1: /* Socclk */ 4125 activity_monitor.Soc_FPS = input[1]; 4126 activity_monitor.Soc_UseRlcBusy = input[2]; 4127 activity_monitor.Soc_MinActiveFreqType = input[3]; 4128 activity_monitor.Soc_MinActiveFreq = input[4]; 4129 activity_monitor.Soc_BoosterFreqType = input[5]; 4130 activity_monitor.Soc_BoosterFreq = input[6]; 4131 activity_monitor.Soc_PD_Data_limit_c = input[7]; 4132 activity_monitor.Soc_PD_Data_error_coeff = input[8]; 4133 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; 4134 break; 4135 case 2: /* Uclk */ 4136 activity_monitor.Mem_FPS = input[1]; 4137 activity_monitor.Mem_UseRlcBusy = input[2]; 4138 activity_monitor.Mem_MinActiveFreqType = input[3]; 4139 activity_monitor.Mem_MinActiveFreq = input[4]; 4140 activity_monitor.Mem_BoosterFreqType = input[5]; 4141 activity_monitor.Mem_BoosterFreq = input[6]; 4142 activity_monitor.Mem_PD_Data_limit_c = input[7]; 4143 activity_monitor.Mem_PD_Data_error_coeff = input[8]; 4144 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; 4145 break; 4146 case 3: /* Fclk */ 4147 activity_monitor.Fclk_FPS = input[1]; 4148 activity_monitor.Fclk_UseRlcBusy = input[2]; 4149 activity_monitor.Fclk_MinActiveFreqType = input[3]; 4150 activity_monitor.Fclk_MinActiveFreq = input[4]; 4151 activity_monitor.Fclk_BoosterFreqType = input[5]; 4152 activity_monitor.Fclk_BoosterFreq = input[6]; 4153 activity_monitor.Fclk_PD_Data_limit_c = input[7]; 4154 activity_monitor.Fclk_PD_Data_error_coeff = input[8]; 4155 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; 4156 break; 4157 } 4158 4159 result = vega20_set_activity_monitor_coeff(hwmgr, 4160 (uint8_t *)(&activity_monitor), 4161 WORKLOAD_PPLIB_CUSTOM_BIT); 4162 data->is_custom_profile_set = true; 4163 PP_ASSERT_WITH_CODE(!result, 4164 "[SetPowerProfile] Failed to set activity monitor!", 4165 return result); 4166 } 4167 4168 out: 4169 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 4170 workload_type = 4171 conv_power_profile_to_pplib_workload(power_profile_mode); 4172 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, 4173 1 << workload_type, 4174 NULL); 4175 4176 hwmgr->power_profile_mode = power_profile_mode; 4177 4178 return 0; 4179 } 4180 4181 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 4182 uint32_t virtual_addr_low, 4183 uint32_t virtual_addr_hi, 4184 uint32_t mc_addr_low, 4185 uint32_t mc_addr_hi, 4186 uint32_t size) 4187 { 4188 smum_send_msg_to_smc_with_parameter(hwmgr, 4189 PPSMC_MSG_SetSystemVirtualDramAddrHigh, 4190 virtual_addr_hi, 4191 NULL); 4192 smum_send_msg_to_smc_with_parameter(hwmgr, 4193 PPSMC_MSG_SetSystemVirtualDramAddrLow, 4194 virtual_addr_low, 4195 NULL); 4196 smum_send_msg_to_smc_with_parameter(hwmgr, 4197 PPSMC_MSG_DramLogSetDramAddrHigh, 4198 mc_addr_hi, 4199 NULL); 4200 4201 smum_send_msg_to_smc_with_parameter(hwmgr, 4202 PPSMC_MSG_DramLogSetDramAddrLow, 4203 mc_addr_low, 4204 NULL); 4205 4206 smum_send_msg_to_smc_with_parameter(hwmgr, 4207 PPSMC_MSG_DramLogSetDramSize, 4208 size, 4209 NULL); 4210 return 0; 4211 } 4212 4213 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 4214 struct PP_TemperatureRange *thermal_data) 4215 { 4216 struct vega20_hwmgr *data = 4217 (struct vega20_hwmgr *)(hwmgr->backend); 4218 PPTable_t *pp_table = &(data->smc_state_table.pp_table); 4219 4220 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange)); 4221 4222 thermal_data->max = pp_table->TedgeLimit * 4223 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4224 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) * 4225 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4226 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit * 4227 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4228 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) * 4229 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4230 thermal_data->mem_crit_max = pp_table->ThbmLimit * 4231 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4232 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)* 4233 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4234 4235 return 0; 4236 } 4237 4238 static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire) 4239 { 4240 int res; 4241 4242 /* I2C bus access can happen very early, when SMU not loaded yet */ 4243 if (!vega20_is_smc_ram_running(hwmgr)) 4244 return 0; 4245 4246 res = smum_send_msg_to_smc_with_parameter(hwmgr, 4247 (acquire ? 4248 PPSMC_MSG_RequestI2CBus : 4249 PPSMC_MSG_ReleaseI2CBus), 4250 0, 4251 NULL); 4252 4253 PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res); 4254 return res; 4255 } 4256 4257 static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr, 4258 enum pp_df_cstate state) 4259 { 4260 int ret; 4261 4262 /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ 4263 if (hwmgr->smu_version < 0x283200) { 4264 pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); 4265 return -EINVAL; 4266 } 4267 4268 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state, 4269 NULL); 4270 if (ret) 4271 pr_err("SetDfCstate failed!\n"); 4272 4273 return ret; 4274 } 4275 4276 static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr, 4277 uint32_t pstate) 4278 { 4279 int ret; 4280 4281 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 4282 PPSMC_MSG_SetXgmiMode, 4283 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 4284 NULL); 4285 if (ret) 4286 pr_err("SetXgmiPstate failed!\n"); 4287 4288 return ret; 4289 } 4290 4291 static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics) 4292 { 4293 memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0)); 4294 4295 gpu_metrics->common_header.structure_size = 4296 sizeof(struct gpu_metrics_v1_0); 4297 gpu_metrics->common_header.format_revision = 1; 4298 gpu_metrics->common_header.content_revision = 0; 4299 4300 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 4301 } 4302 4303 static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr, 4304 void **table) 4305 { 4306 struct vega20_hwmgr *data = 4307 (struct vega20_hwmgr *)(hwmgr->backend); 4308 struct gpu_metrics_v1_0 *gpu_metrics = 4309 &data->gpu_metrics_table; 4310 SmuMetrics_t metrics; 4311 uint32_t fan_speed_rpm; 4312 int ret; 4313 4314 ret = vega20_get_metrics_table(hwmgr, &metrics, true); 4315 if (ret) 4316 return ret; 4317 4318 vega20_init_gpu_metrics_v1_0(gpu_metrics); 4319 4320 gpu_metrics->temperature_edge = metrics.TemperatureEdge; 4321 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; 4322 gpu_metrics->temperature_mem = metrics.TemperatureHBM; 4323 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; 4324 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; 4325 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; 4326 4327 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; 4328 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; 4329 4330 gpu_metrics->average_socket_power = metrics.AverageSocketPower; 4331 4332 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; 4333 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; 4334 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; 4335 4336 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; 4337 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; 4338 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; 4339 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; 4340 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 4341 4342 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 4343 4344 vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm); 4345 gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm; 4346 4347 gpu_metrics->pcie_link_width = 4348 vega20_get_current_pcie_link_width(hwmgr); 4349 gpu_metrics->pcie_link_speed = 4350 vega20_get_current_pcie_link_speed(hwmgr); 4351 4352 *table = (void *)gpu_metrics; 4353 4354 return sizeof(struct gpu_metrics_v1_0); 4355 } 4356 4357 static const struct pp_hwmgr_func vega20_hwmgr_funcs = { 4358 /* init/fini related */ 4359 .backend_init = vega20_hwmgr_backend_init, 4360 .backend_fini = vega20_hwmgr_backend_fini, 4361 .asic_setup = vega20_setup_asic_task, 4362 .power_off_asic = vega20_power_off_asic, 4363 .dynamic_state_management_enable = vega20_enable_dpm_tasks, 4364 .dynamic_state_management_disable = vega20_disable_dpm_tasks, 4365 /* power state related */ 4366 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules, 4367 .pre_display_config_changed = vega20_pre_display_configuration_changed_task, 4368 .display_config_changed = vega20_display_configuration_changed_task, 4369 .check_smc_update_required_for_display_configuration = 4370 vega20_check_smc_update_required_for_display_configuration, 4371 .notify_smc_display_config_after_ps_adjustment = 4372 vega20_notify_smc_display_config_after_ps_adjustment, 4373 /* export to DAL */ 4374 .get_sclk = vega20_dpm_get_sclk, 4375 .get_mclk = vega20_dpm_get_mclk, 4376 .get_dal_power_level = vega20_get_dal_power_level, 4377 .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency, 4378 .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage, 4379 .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges, 4380 .display_clock_voltage_request = vega20_display_clock_voltage_request, 4381 .get_performance_level = vega20_get_performance_level, 4382 /* UMD pstate, profile related */ 4383 .force_dpm_level = vega20_dpm_force_dpm_level, 4384 .get_power_profile_mode = vega20_get_power_profile_mode, 4385 .set_power_profile_mode = vega20_set_power_profile_mode, 4386 /* od related */ 4387 .set_power_limit = vega20_set_power_limit, 4388 .get_sclk_od = vega20_get_sclk_od, 4389 .set_sclk_od = vega20_set_sclk_od, 4390 .get_mclk_od = vega20_get_mclk_od, 4391 .set_mclk_od = vega20_set_mclk_od, 4392 .odn_edit_dpm_table = vega20_odn_edit_dpm_table, 4393 /* for sysfs to retrive/set gfxclk/memclk */ 4394 .force_clock_level = vega20_force_clock_level, 4395 .print_clock_levels = vega20_print_clock_levels, 4396 .read_sensor = vega20_read_sensor, 4397 .get_ppfeature_status = vega20_get_ppfeature_status, 4398 .set_ppfeature_status = vega20_set_ppfeature_status, 4399 /* powergate related */ 4400 .powergate_uvd = vega20_power_gate_uvd, 4401 .powergate_vce = vega20_power_gate_vce, 4402 /* thermal related */ 4403 .start_thermal_controller = vega20_start_thermal_controller, 4404 .stop_thermal_controller = vega20_thermal_stop_thermal_controller, 4405 .get_thermal_temperature_range = vega20_get_thermal_temperature_range, 4406 .register_irq_handlers = smu9_register_irq_handlers, 4407 .disable_smc_firmware_ctf = vega20_thermal_disable_alert, 4408 /* fan control related */ 4409 .get_fan_speed_pwm = vega20_fan_ctrl_get_fan_speed_pwm, 4410 .set_fan_speed_pwm = vega20_fan_ctrl_set_fan_speed_pwm, 4411 .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info, 4412 .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm, 4413 .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm, 4414 .get_fan_control_mode = vega20_get_fan_control_mode, 4415 .set_fan_control_mode = vega20_set_fan_control_mode, 4416 /* smu memory related */ 4417 .notify_cac_buffer_info = vega20_notify_cac_buffer_info, 4418 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, 4419 /* BACO related */ 4420 .get_asic_baco_capability = vega20_baco_get_capability, 4421 .get_asic_baco_state = vega20_baco_get_state, 4422 .set_asic_baco_state = vega20_baco_set_state, 4423 .set_mp1_state = vega20_set_mp1_state, 4424 .smu_i2c_bus_access = vega20_smu_i2c_bus_access, 4425 .set_df_cstate = vega20_set_df_cstate, 4426 .set_xgmi_pstate = vega20_set_xgmi_pstate, 4427 .get_gpu_metrics = vega20_get_gpu_metrics, 4428 }; 4429 4430 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) 4431 { 4432 hwmgr->hwmgr_func = &vega20_hwmgr_funcs; 4433 hwmgr->pptable_func = &vega20_pptable_funcs; 4434 4435 return 0; 4436 } 4437