1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include <linux/firmware.h> 27 #include <linux/pci.h> 28 #include <linux/i2c.h> 29 #include "amdgpu.h" 30 #include "amdgpu_smu.h" 31 #include "atomfirmware.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "amdgpu_atombios.h" 34 #include "smu_v14_0.h" 35 #include "smu14_driver_if_v14_0.h" 36 #include "soc15_common.h" 37 #include "atom.h" 38 #include "smu_v14_0_2_ppt.h" 39 #include "smu_v14_0_2_pptable.h" 40 #include "smu_v14_0_2_ppsmc.h" 41 #include "mp/mp_14_0_2_offset.h" 42 #include "mp/mp_14_0_2_sh_mask.h" 43 44 #include "smu_cmn.h" 45 #include "amdgpu_ras.h" 46 47 /* 48 * DO NOT use these for err/warn/info/debug messages. 49 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 50 * They are more MGPU friendly. 51 */ 52 #undef pr_err 53 #undef pr_warn 54 #undef pr_info 55 #undef pr_debug 56 57 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 58 59 #define FEATURE_MASK(feature) (1ULL << feature) 60 #define SMC_DPM_FEATURE ( \ 61 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 62 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 63 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 64 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 65 FEATURE_MASK(FEATURE_DPM_FCLK_BIT)) 66 67 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 68 #define DEBUGSMC_MSG_Mode1Reset 2 69 #define LINK_SPEED_MAX 3 70 71 #define PP_OD_FEATURE_GFXCLK_FMIN 0 72 #define PP_OD_FEATURE_GFXCLK_FMAX 1 73 #define PP_OD_FEATURE_UCLK_FMIN 2 74 #define PP_OD_FEATURE_UCLK_FMAX 3 75 #define PP_OD_FEATURE_GFX_VF_CURVE 4 76 #define PP_OD_FEATURE_FAN_CURVE_TEMP 5 77 #define PP_OD_FEATURE_FAN_CURVE_PWM 6 78 #define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7 79 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 80 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 81 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 82 83 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { 84 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 85 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 86 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 87 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), 88 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), 89 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 90 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 91 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), 92 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), 93 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), 94 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), 95 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), 96 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), 97 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), 98 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 99 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 100 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 101 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 102 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 103 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 104 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 105 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 106 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), 107 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 108 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), 109 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), 110 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), 111 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), 112 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), 113 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 114 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 115 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 116 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 117 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 118 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 119 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 120 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), 121 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), 122 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), 123 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), 124 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), 125 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 126 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 127 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), 128 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 129 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), 130 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), 131 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), 132 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), 133 MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0), 134 MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, 135 PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), 136 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), 137 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), 138 }; 139 140 static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = { 141 CLK_MAP(GFXCLK, PPCLK_GFXCLK), 142 CLK_MAP(SCLK, PPCLK_GFXCLK), 143 CLK_MAP(SOCCLK, PPCLK_SOCCLK), 144 CLK_MAP(FCLK, PPCLK_FCLK), 145 CLK_MAP(UCLK, PPCLK_UCLK), 146 CLK_MAP(MCLK, PPCLK_UCLK), 147 CLK_MAP(VCLK, PPCLK_VCLK_0), 148 CLK_MAP(DCLK, PPCLK_DCLK_0), 149 CLK_MAP(DCEFCLK, PPCLK_DCFCLK), 150 }; 151 152 static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = { 153 FEA_MAP(FW_DATA_READ), 154 FEA_MAP(DPM_GFXCLK), 155 FEA_MAP(DPM_GFX_POWER_OPTIMIZER), 156 FEA_MAP(DPM_UCLK), 157 FEA_MAP(DPM_FCLK), 158 FEA_MAP(DPM_SOCCLK), 159 FEA_MAP(DPM_LINK), 160 FEA_MAP(DPM_DCN), 161 FEA_MAP(VMEMP_SCALING), 162 FEA_MAP(VDDIO_MEM_SCALING), 163 FEA_MAP(DS_GFXCLK), 164 FEA_MAP(DS_SOCCLK), 165 FEA_MAP(DS_FCLK), 166 FEA_MAP(DS_LCLK), 167 FEA_MAP(DS_DCFCLK), 168 FEA_MAP(DS_UCLK), 169 FEA_MAP(GFX_ULV), 170 FEA_MAP(FW_DSTATE), 171 FEA_MAP(GFXOFF), 172 FEA_MAP(BACO), 173 FEA_MAP(MM_DPM), 174 FEA_MAP(SOC_MPCLK_DS), 175 FEA_MAP(BACO_MPCLK_DS), 176 FEA_MAP(THROTTLERS), 177 FEA_MAP(SMARTSHIFT), 178 FEA_MAP(GTHR), 179 FEA_MAP(ACDC), 180 FEA_MAP(VR0HOT), 181 FEA_MAP(FW_CTF), 182 FEA_MAP(FAN_CONTROL), 183 FEA_MAP(GFX_DCS), 184 FEA_MAP(GFX_READ_MARGIN), 185 FEA_MAP(LED_DISPLAY), 186 FEA_MAP(GFXCLK_SPREAD_SPECTRUM), 187 FEA_MAP(OUT_OF_BAND_MONITOR), 188 FEA_MAP(OPTIMIZED_VMIN), 189 FEA_MAP(GFX_IMU), 190 FEA_MAP(BOOT_TIME_CAL), 191 FEA_MAP(GFX_PCC_DFLL), 192 FEA_MAP(SOC_CG), 193 FEA_MAP(DF_CSTATE), 194 FEA_MAP(GFX_EDC), 195 FEA_MAP(BOOT_POWER_OPT), 196 FEA_MAP(CLOCK_POWER_DOWN_BYPASS), 197 FEA_MAP(DS_VCN), 198 FEA_MAP(BACO_CG), 199 FEA_MAP(MEM_TEMP_READ), 200 FEA_MAP(ATHUB_MMHUB_PG), 201 FEA_MAP(SOC_PCC), 202 FEA_MAP(EDC_PWRBRK), 203 FEA_MAP(SOC_EDC_XVMIN), 204 FEA_MAP(GFX_PSM_DIDT), 205 FEA_MAP(APT_ALL_ENABLE), 206 FEA_MAP(APT_SQ_THROTTLE), 207 FEA_MAP(APT_PF_DCS), 208 FEA_MAP(GFX_EDC_XVMIN), 209 FEA_MAP(GFX_DIDT_XVMIN), 210 FEA_MAP(FAN_ABNORMAL), 211 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 212 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 213 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 214 }; 215 216 static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = { 217 TAB_MAP(PPTABLE), 218 TAB_MAP(WATERMARKS), 219 TAB_MAP(AVFS_PSM_DEBUG), 220 TAB_MAP(PMSTATUSLOG), 221 TAB_MAP(SMU_METRICS), 222 TAB_MAP(DRIVER_SMU_CONFIG), 223 TAB_MAP(ACTIVITY_MONITOR_COEFF), 224 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, 225 TAB_MAP(I2C_COMMANDS), 226 TAB_MAP(ECCINFO), 227 TAB_MAP(OVERDRIVE), 228 }; 229 230 static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { 231 PWR_MAP(AC), 232 PWR_MAP(DC), 233 }; 234 235 static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 236 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), 237 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 238 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 239 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 240 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 241 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 242 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 243 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), 244 }; 245 246 static const uint8_t smu_v14_0_2_throttler_map[] = { 247 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 248 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 249 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 250 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 251 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 252 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 253 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 254 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 255 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 256 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 257 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 258 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 259 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 260 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 261 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 262 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), 263 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 264 }; 265 266 static int 267 smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu, 268 uint32_t *feature_mask, uint32_t num) 269 { 270 struct amdgpu_device *adev = smu->adev; 271 /*u32 smu_version;*/ 272 273 if (num > 2) 274 return -EINVAL; 275 276 memset(feature_mask, 0xff, sizeof(uint32_t) * num); 277 278 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { 279 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); 280 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); 281 } 282 #if 0 283 if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) || 284 !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) 285 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); 286 287 if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)) 288 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); 289 290 /* PMFW 78.58 contains a critical fix for gfxoff feature */ 291 smu_cmn_get_smc_version(smu, NULL, &smu_version); 292 if ((smu_version < 0x004e3a00) || 293 !(adev->pm.pp_feature & PP_GFXOFF_MASK)) 294 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT); 295 296 if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) { 297 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT); 298 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); 299 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); 300 } 301 302 if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)) 303 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); 304 305 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { 306 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT); 307 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT); 308 } 309 310 if (!(adev->pm.pp_feature & PP_ULV_MASK)) 311 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT); 312 #endif 313 314 return 0; 315 } 316 317 static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu) 318 { 319 struct smu_table_context *table_context = &smu->smu_table; 320 struct smu_14_0_2_powerplay_table *powerplay_table = 321 table_context->power_play_table; 322 struct smu_baco_context *smu_baco = &smu->smu_baco; 323 PPTable_t *pptable = smu->smu_table.driver_pptable; 324 const OverDriveLimits_t * const overdrive_upperlimits = 325 &pptable->SkuTable.OverDriveLimitsBasicMax; 326 const OverDriveLimits_t * const overdrive_lowerlimits = 327 &pptable->SkuTable.OverDriveLimitsBasicMin; 328 329 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC) 330 smu->dc_controlled_by_gpio = true; 331 332 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) { 333 smu_baco->platform_support = true; 334 335 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO) 336 smu_baco->maco_support = true; 337 } 338 339 if (!overdrive_lowerlimits->FeatureCtrlMask || 340 !overdrive_upperlimits->FeatureCtrlMask) 341 smu->od_enabled = false; 342 343 table_context->thermal_controller_type = 344 powerplay_table->thermal_controller_type; 345 346 /* 347 * Instead of having its own buffer space and get overdrive_table copied, 348 * smu->od_settings just points to the actual overdrive_table 349 */ 350 smu->od_settings = &powerplay_table->overdrive_table; 351 352 smu->adev->pm.no_fan = 353 !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); 354 355 return 0; 356 } 357 358 static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) 359 { 360 struct smu_table_context *table_context = &smu->smu_table; 361 struct smu_14_0_2_powerplay_table *powerplay_table = 362 table_context->power_play_table; 363 364 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, 365 sizeof(PPTable_t)); 366 367 return 0; 368 } 369 370 #ifndef atom_smc_dpm_info_table_14_0_0 371 struct atom_smc_dpm_info_table_14_0_0 { 372 struct atom_common_table_header table_header; 373 BoardTable_t BoardTable; 374 }; 375 #endif 376 377 static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu) 378 { 379 struct smu_table_context *table_context = &smu->smu_table; 380 PPTable_t *smc_pptable = table_context->driver_pptable; 381 struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table; 382 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 383 int index, ret; 384 385 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 386 smc_dpm_info); 387 388 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, 389 (uint8_t **)&smc_dpm_table); 390 if (ret) 391 return ret; 392 393 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); 394 395 return 0; 396 } 397 398 #if 0 399 static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, 400 void **table, 401 uint32_t *size) 402 { 403 struct smu_table_context *smu_table = &smu->smu_table; 404 void *combo_pptable = smu_table->combo_pptable; 405 int ret = 0; 406 407 ret = smu_cmn_get_combo_pptable(smu); 408 if (ret) 409 return ret; 410 411 *table = combo_pptable; 412 *size = sizeof(struct smu_14_0_powerplay_table); 413 414 return 0; 415 } 416 #endif 417 418 static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, 419 void **table, 420 uint32_t *size) 421 { 422 struct smu_table_context *smu_table = &smu->smu_table; 423 void *combo_pptable = smu_table->combo_pptable; 424 int ret = 0; 425 426 ret = smu_cmn_get_combo_pptable(smu); 427 if (ret) 428 return ret; 429 430 *table = combo_pptable; 431 *size = sizeof(struct smu_14_0_2_powerplay_table); 432 433 return 0; 434 } 435 436 static int smu_v14_0_2_setup_pptable(struct smu_context *smu) 437 { 438 struct smu_table_context *smu_table = &smu->smu_table; 439 struct amdgpu_device *adev = smu->adev; 440 int ret = 0; 441 442 if (amdgpu_sriov_vf(smu->adev)) 443 return 0; 444 445 if (!adev->scpm_enabled) 446 ret = smu_v14_0_setup_pptable(smu); 447 else 448 ret = smu_v14_0_2_get_pptable_from_pmfw(smu, 449 &smu_table->power_play_table, 450 &smu_table->power_play_table_size); 451 if (ret) 452 return ret; 453 454 ret = smu_v14_0_2_store_powerplay_table(smu); 455 if (ret) 456 return ret; 457 458 /* 459 * With SCPM enabled, the operation below will be handled 460 * by PSP. Driver involvment is unnecessary and useless. 461 */ 462 if (!adev->scpm_enabled) { 463 ret = smu_v14_0_2_append_powerplay_table(smu); 464 if (ret) 465 return ret; 466 } 467 468 ret = smu_v14_0_2_check_powerplay_table(smu); 469 if (ret) 470 return ret; 471 472 return ret; 473 } 474 475 static int smu_v14_0_2_tables_init(struct smu_context *smu) 476 { 477 struct smu_table_context *smu_table = &smu->smu_table; 478 struct smu_table *tables = smu_table->tables; 479 480 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), 481 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 482 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 483 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 484 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), 485 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 486 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 487 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 488 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), 489 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 490 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU14_TOOL_SIZE, 491 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 492 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 493 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, 494 AMDGPU_GEM_DOMAIN_VRAM); 495 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, 496 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 497 SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), 498 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 499 500 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); 501 if (!smu_table->metrics_table) 502 goto err0_out; 503 smu_table->metrics_time = 0; 504 505 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 506 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 507 if (!smu_table->gpu_metrics_table) 508 goto err1_out; 509 510 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 511 if (!smu_table->watermarks_table) 512 goto err2_out; 513 514 smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); 515 if (!smu_table->ecc_table) 516 goto err3_out; 517 518 return 0; 519 520 err3_out: 521 kfree(smu_table->watermarks_table); 522 err2_out: 523 kfree(smu_table->gpu_metrics_table); 524 err1_out: 525 kfree(smu_table->metrics_table); 526 err0_out: 527 return -ENOMEM; 528 } 529 530 static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu) 531 { 532 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 533 534 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context), 535 GFP_KERNEL); 536 if (!smu_dpm->dpm_context) 537 return -ENOMEM; 538 539 smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context); 540 541 return 0; 542 } 543 544 static int smu_v14_0_2_init_smc_tables(struct smu_context *smu) 545 { 546 int ret = 0; 547 548 ret = smu_v14_0_2_tables_init(smu); 549 if (ret) 550 return ret; 551 552 ret = smu_v14_0_2_allocate_dpm_context(smu); 553 if (ret) 554 return ret; 555 556 return smu_v14_0_init_smc_tables(smu); 557 } 558 559 static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) 560 { 561 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 562 struct smu_table_context *table_context = &smu->smu_table; 563 PPTable_t *pptable = table_context->driver_pptable; 564 SkuTable_t *skutable = &pptable->SkuTable; 565 struct smu_14_0_dpm_table *dpm_table; 566 struct smu_14_0_pcie_table *pcie_table; 567 uint32_t link_level; 568 int ret = 0; 569 570 /* socclk dpm table setup */ 571 dpm_table = &dpm_context->dpm_tables.soc_table; 572 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 573 ret = smu_v14_0_set_single_dpm_table(smu, 574 SMU_SOCCLK, 575 dpm_table); 576 if (ret) 577 return ret; 578 } else { 579 dpm_table->count = 1; 580 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; 581 dpm_table->dpm_levels[0].enabled = true; 582 dpm_table->min = dpm_table->dpm_levels[0].value; 583 dpm_table->max = dpm_table->dpm_levels[0].value; 584 } 585 586 /* gfxclk dpm table setup */ 587 dpm_table = &dpm_context->dpm_tables.gfx_table; 588 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 589 ret = smu_v14_0_set_single_dpm_table(smu, 590 SMU_GFXCLK, 591 dpm_table); 592 if (ret) 593 return ret; 594 595 /* 596 * Update the reported maximum shader clock to the value 597 * which can be guarded to be achieved on all cards. This 598 * is aligned with Window setting. And considering that value 599 * might be not the peak frequency the card can achieve, it 600 * is normal some real-time clock frequency can overtake this 601 * labelled maximum clock frequency(for example in pp_dpm_sclk 602 * sysfs output). 603 */ 604 if (skutable->DriverReportedClocks.GameClockAc && 605 (dpm_table->dpm_levels[dpm_table->count - 1].value > 606 skutable->DriverReportedClocks.GameClockAc)) { 607 dpm_table->dpm_levels[dpm_table->count - 1].value = 608 skutable->DriverReportedClocks.GameClockAc; 609 dpm_table->max = skutable->DriverReportedClocks.GameClockAc; 610 } 611 } else { 612 dpm_table->count = 1; 613 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; 614 dpm_table->dpm_levels[0].enabled = true; 615 dpm_table->min = dpm_table->dpm_levels[0].value; 616 dpm_table->max = dpm_table->dpm_levels[0].value; 617 } 618 619 /* uclk dpm table setup */ 620 dpm_table = &dpm_context->dpm_tables.uclk_table; 621 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 622 ret = smu_v14_0_set_single_dpm_table(smu, 623 SMU_UCLK, 624 dpm_table); 625 if (ret) 626 return ret; 627 } else { 628 dpm_table->count = 1; 629 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; 630 dpm_table->dpm_levels[0].enabled = true; 631 dpm_table->min = dpm_table->dpm_levels[0].value; 632 dpm_table->max = dpm_table->dpm_levels[0].value; 633 } 634 635 /* fclk dpm table setup */ 636 dpm_table = &dpm_context->dpm_tables.fclk_table; 637 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { 638 ret = smu_v14_0_set_single_dpm_table(smu, 639 SMU_FCLK, 640 dpm_table); 641 if (ret) 642 return ret; 643 } else { 644 dpm_table->count = 1; 645 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; 646 dpm_table->dpm_levels[0].enabled = true; 647 dpm_table->min = dpm_table->dpm_levels[0].value; 648 dpm_table->max = dpm_table->dpm_levels[0].value; 649 } 650 651 /* vclk dpm table setup */ 652 dpm_table = &dpm_context->dpm_tables.vclk_table; 653 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { 654 ret = smu_v14_0_set_single_dpm_table(smu, 655 SMU_VCLK, 656 dpm_table); 657 if (ret) 658 return ret; 659 } else { 660 dpm_table->count = 1; 661 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; 662 dpm_table->dpm_levels[0].enabled = true; 663 dpm_table->min = dpm_table->dpm_levels[0].value; 664 dpm_table->max = dpm_table->dpm_levels[0].value; 665 } 666 667 /* dclk dpm table setup */ 668 dpm_table = &dpm_context->dpm_tables.dclk_table; 669 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { 670 ret = smu_v14_0_set_single_dpm_table(smu, 671 SMU_DCLK, 672 dpm_table); 673 if (ret) 674 return ret; 675 } else { 676 dpm_table->count = 1; 677 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; 678 dpm_table->dpm_levels[0].enabled = true; 679 dpm_table->min = dpm_table->dpm_levels[0].value; 680 dpm_table->max = dpm_table->dpm_levels[0].value; 681 } 682 683 /* lclk dpm table setup */ 684 pcie_table = &dpm_context->dpm_tables.pcie_table; 685 pcie_table->num_of_link_levels = 0; 686 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { 687 if (!skutable->PcieGenSpeed[link_level] && 688 !skutable->PcieLaneCount[link_level] && 689 !skutable->LclkFreq[link_level]) 690 continue; 691 692 pcie_table->pcie_gen[pcie_table->num_of_link_levels] = 693 skutable->PcieGenSpeed[link_level]; 694 pcie_table->pcie_lane[pcie_table->num_of_link_levels] = 695 skutable->PcieLaneCount[link_level]; 696 pcie_table->clk_freq[pcie_table->num_of_link_levels] = 697 skutable->LclkFreq[link_level]; 698 pcie_table->num_of_link_levels++; 699 700 if (link_level == 0) 701 link_level++; 702 } 703 704 /* dcefclk dpm table setup */ 705 dpm_table = &dpm_context->dpm_tables.dcef_table; 706 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { 707 ret = smu_v14_0_set_single_dpm_table(smu, 708 SMU_DCEFCLK, 709 dpm_table); 710 if (ret) 711 return ret; 712 } else { 713 dpm_table->count = 1; 714 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100; 715 dpm_table->dpm_levels[0].enabled = true; 716 dpm_table->min = dpm_table->dpm_levels[0].value; 717 dpm_table->max = dpm_table->dpm_levels[0].value; 718 } 719 720 return 0; 721 } 722 723 static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) 724 { 725 int ret = 0; 726 uint64_t feature_enabled; 727 728 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 729 if (ret) 730 return false; 731 732 return !!(feature_enabled & SMC_DPM_FEATURE); 733 } 734 735 static void smu_v14_0_2_dump_pptable(struct smu_context *smu) 736 { 737 struct smu_table_context *table_context = &smu->smu_table; 738 PPTable_t *pptable = table_context->driver_pptable; 739 PFE_Settings_t *PFEsettings = &pptable->PFE_Settings; 740 741 dev_info(smu->adev->dev, "Dumped PPTable:\n"); 742 743 dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version); 744 dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]); 745 dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]); 746 } 747 748 static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) 749 { 750 uint32_t throttler_status = 0; 751 int i; 752 753 for (i = 0; i < THROTTLER_COUNT; i++) 754 throttler_status |= 755 (metrics->ThrottlingPercentage[i] ? 1U << i : 0); 756 757 return throttler_status; 758 } 759 760 #define SMU_14_0_2_BUSY_THRESHOLD 5 761 static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu, 762 MetricsMember_t member, 763 uint32_t *value) 764 { 765 struct smu_table_context *smu_table = &smu->smu_table; 766 SmuMetrics_t *metrics = 767 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 768 int ret = 0; 769 770 ret = smu_cmn_get_metrics_table(smu, 771 NULL, 772 false); 773 if (ret) 774 return ret; 775 776 switch (member) { 777 case METRICS_CURR_GFXCLK: 778 *value = metrics->CurrClock[PPCLK_GFXCLK]; 779 break; 780 case METRICS_CURR_SOCCLK: 781 *value = metrics->CurrClock[PPCLK_SOCCLK]; 782 break; 783 case METRICS_CURR_UCLK: 784 *value = metrics->CurrClock[PPCLK_UCLK]; 785 break; 786 case METRICS_CURR_VCLK: 787 *value = metrics->CurrClock[PPCLK_VCLK_0]; 788 break; 789 case METRICS_CURR_DCLK: 790 *value = metrics->CurrClock[PPCLK_DCLK_0]; 791 break; 792 case METRICS_CURR_FCLK: 793 *value = metrics->CurrClock[PPCLK_FCLK]; 794 break; 795 case METRICS_CURR_DCEFCLK: 796 *value = metrics->CurrClock[PPCLK_DCFCLK]; 797 break; 798 case METRICS_AVERAGE_GFXCLK: 799 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) 800 *value = metrics->AverageGfxclkFrequencyPostDs; 801 else 802 *value = metrics->AverageGfxclkFrequencyPreDs; 803 break; 804 case METRICS_AVERAGE_FCLK: 805 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) 806 *value = metrics->AverageFclkFrequencyPostDs; 807 else 808 *value = metrics->AverageFclkFrequencyPreDs; 809 break; 810 case METRICS_AVERAGE_UCLK: 811 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) 812 *value = metrics->AverageMemclkFrequencyPostDs; 813 else 814 *value = metrics->AverageMemclkFrequencyPreDs; 815 break; 816 case METRICS_AVERAGE_VCLK: 817 *value = metrics->AverageVclk0Frequency; 818 break; 819 case METRICS_AVERAGE_DCLK: 820 *value = metrics->AverageDclk0Frequency; 821 break; 822 case METRICS_AVERAGE_VCLK1: 823 *value = metrics->AverageVclk1Frequency; 824 break; 825 case METRICS_AVERAGE_DCLK1: 826 *value = metrics->AverageDclk1Frequency; 827 break; 828 case METRICS_AVERAGE_GFXACTIVITY: 829 *value = metrics->AverageGfxActivity; 830 break; 831 case METRICS_AVERAGE_MEMACTIVITY: 832 *value = metrics->AverageUclkActivity; 833 break; 834 case METRICS_AVERAGE_SOCKETPOWER: 835 *value = metrics->AverageSocketPower << 8; 836 break; 837 case METRICS_TEMPERATURE_EDGE: 838 *value = metrics->AvgTemperature[TEMP_EDGE] * 839 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 840 break; 841 case METRICS_TEMPERATURE_HOTSPOT: 842 *value = metrics->AvgTemperature[TEMP_HOTSPOT] * 843 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 844 break; 845 case METRICS_TEMPERATURE_MEM: 846 *value = metrics->AvgTemperature[TEMP_MEM] * 847 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 848 break; 849 case METRICS_TEMPERATURE_VRGFX: 850 *value = metrics->AvgTemperature[TEMP_VR_GFX] * 851 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 852 break; 853 case METRICS_TEMPERATURE_VRSOC: 854 *value = metrics->AvgTemperature[TEMP_VR_SOC] * 855 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 856 break; 857 case METRICS_THROTTLER_STATUS: 858 *value = smu_v14_0_2_get_throttler_status(metrics); 859 break; 860 case METRICS_CURR_FANSPEED: 861 *value = metrics->AvgFanRpm; 862 break; 863 case METRICS_CURR_FANPWM: 864 *value = metrics->AvgFanPwm; 865 break; 866 case METRICS_VOLTAGE_VDDGFX: 867 *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; 868 break; 869 case METRICS_PCIE_RATE: 870 *value = metrics->PcieRate; 871 break; 872 case METRICS_PCIE_WIDTH: 873 *value = metrics->PcieWidth; 874 break; 875 default: 876 *value = UINT_MAX; 877 break; 878 } 879 880 return ret; 881 } 882 883 static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu, 884 enum smu_clk_type clk_type, 885 uint32_t *min, 886 uint32_t *max) 887 { 888 struct smu_14_0_dpm_context *dpm_context = 889 smu->smu_dpm.dpm_context; 890 struct smu_14_0_dpm_table *dpm_table; 891 892 switch (clk_type) { 893 case SMU_MCLK: 894 case SMU_UCLK: 895 /* uclk dpm table */ 896 dpm_table = &dpm_context->dpm_tables.uclk_table; 897 break; 898 case SMU_GFXCLK: 899 case SMU_SCLK: 900 /* gfxclk dpm table */ 901 dpm_table = &dpm_context->dpm_tables.gfx_table; 902 break; 903 case SMU_SOCCLK: 904 /* socclk dpm table */ 905 dpm_table = &dpm_context->dpm_tables.soc_table; 906 break; 907 case SMU_FCLK: 908 /* fclk dpm table */ 909 dpm_table = &dpm_context->dpm_tables.fclk_table; 910 break; 911 case SMU_VCLK: 912 case SMU_VCLK1: 913 /* vclk dpm table */ 914 dpm_table = &dpm_context->dpm_tables.vclk_table; 915 break; 916 case SMU_DCLK: 917 case SMU_DCLK1: 918 /* dclk dpm table */ 919 dpm_table = &dpm_context->dpm_tables.dclk_table; 920 break; 921 default: 922 dev_err(smu->adev->dev, "Unsupported clock type!\n"); 923 return -EINVAL; 924 } 925 926 if (min) 927 *min = dpm_table->min; 928 if (max) 929 *max = dpm_table->max; 930 931 return 0; 932 } 933 934 static int smu_v14_0_2_read_sensor(struct smu_context *smu, 935 enum amd_pp_sensors sensor, 936 void *data, 937 uint32_t *size) 938 { 939 struct smu_table_context *table_context = &smu->smu_table; 940 PPTable_t *smc_pptable = table_context->driver_pptable; 941 int ret = 0; 942 943 switch (sensor) { 944 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 945 *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm; 946 *size = 4; 947 break; 948 case AMDGPU_PP_SENSOR_MEM_LOAD: 949 ret = smu_v14_0_2_get_smu_metrics_data(smu, 950 METRICS_AVERAGE_MEMACTIVITY, 951 (uint32_t *)data); 952 *size = 4; 953 break; 954 case AMDGPU_PP_SENSOR_GPU_LOAD: 955 ret = smu_v14_0_2_get_smu_metrics_data(smu, 956 METRICS_AVERAGE_GFXACTIVITY, 957 (uint32_t *)data); 958 *size = 4; 959 break; 960 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 961 ret = smu_v14_0_2_get_smu_metrics_data(smu, 962 METRICS_AVERAGE_SOCKETPOWER, 963 (uint32_t *)data); 964 *size = 4; 965 break; 966 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 967 ret = smu_v14_0_2_get_smu_metrics_data(smu, 968 METRICS_TEMPERATURE_HOTSPOT, 969 (uint32_t *)data); 970 *size = 4; 971 break; 972 case AMDGPU_PP_SENSOR_EDGE_TEMP: 973 ret = smu_v14_0_2_get_smu_metrics_data(smu, 974 METRICS_TEMPERATURE_EDGE, 975 (uint32_t *)data); 976 *size = 4; 977 break; 978 case AMDGPU_PP_SENSOR_MEM_TEMP: 979 ret = smu_v14_0_2_get_smu_metrics_data(smu, 980 METRICS_TEMPERATURE_MEM, 981 (uint32_t *)data); 982 *size = 4; 983 break; 984 case AMDGPU_PP_SENSOR_GFX_MCLK: 985 ret = smu_v14_0_2_get_smu_metrics_data(smu, 986 METRICS_CURR_UCLK, 987 (uint32_t *)data); 988 *(uint32_t *)data *= 100; 989 *size = 4; 990 break; 991 case AMDGPU_PP_SENSOR_GFX_SCLK: 992 ret = smu_v14_0_2_get_smu_metrics_data(smu, 993 METRICS_AVERAGE_GFXCLK, 994 (uint32_t *)data); 995 *(uint32_t *)data *= 100; 996 *size = 4; 997 break; 998 case AMDGPU_PP_SENSOR_VDDGFX: 999 ret = smu_v14_0_2_get_smu_metrics_data(smu, 1000 METRICS_VOLTAGE_VDDGFX, 1001 (uint32_t *)data); 1002 *size = 4; 1003 break; 1004 default: 1005 ret = -EOPNOTSUPP; 1006 break; 1007 } 1008 1009 return ret; 1010 } 1011 1012 static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu, 1013 enum smu_clk_type clk_type, 1014 uint32_t *value) 1015 { 1016 MetricsMember_t member_type; 1017 int clk_id = 0; 1018 1019 clk_id = smu_cmn_to_asic_specific_index(smu, 1020 CMN2ASIC_MAPPING_CLK, 1021 clk_type); 1022 if (clk_id < 0) 1023 return -EINVAL; 1024 1025 switch (clk_id) { 1026 case PPCLK_GFXCLK: 1027 member_type = METRICS_AVERAGE_GFXCLK; 1028 break; 1029 case PPCLK_UCLK: 1030 member_type = METRICS_CURR_UCLK; 1031 break; 1032 case PPCLK_FCLK: 1033 member_type = METRICS_CURR_FCLK; 1034 break; 1035 case PPCLK_SOCCLK: 1036 member_type = METRICS_CURR_SOCCLK; 1037 break; 1038 case PPCLK_VCLK_0: 1039 member_type = METRICS_AVERAGE_VCLK; 1040 break; 1041 case PPCLK_DCLK_0: 1042 member_type = METRICS_AVERAGE_DCLK; 1043 break; 1044 case PPCLK_DCFCLK: 1045 member_type = METRICS_CURR_DCEFCLK; 1046 break; 1047 default: 1048 return -EINVAL; 1049 } 1050 1051 return smu_v14_0_2_get_smu_metrics_data(smu, 1052 member_type, 1053 value); 1054 } 1055 1056 static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu, 1057 int od_feature_bit) 1058 { 1059 PPTable_t *pptable = smu->smu_table.driver_pptable; 1060 const OverDriveLimits_t * const overdrive_upperlimits = 1061 &pptable->SkuTable.OverDriveLimitsBasicMax; 1062 1063 return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit); 1064 } 1065 1066 static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu, 1067 int od_feature_bit, 1068 int32_t *min, 1069 int32_t *max) 1070 { 1071 PPTable_t *pptable = smu->smu_table.driver_pptable; 1072 const OverDriveLimits_t * const overdrive_upperlimits = 1073 &pptable->SkuTable.OverDriveLimitsBasicMax; 1074 const OverDriveLimits_t * const overdrive_lowerlimits = 1075 &pptable->SkuTable.OverDriveLimitsBasicMin; 1076 int32_t od_min_setting, od_max_setting; 1077 1078 switch (od_feature_bit) { 1079 case PP_OD_FEATURE_GFXCLK_FMIN: 1080 od_min_setting = overdrive_lowerlimits->GfxclkFmin; 1081 od_max_setting = overdrive_upperlimits->GfxclkFmin; 1082 break; 1083 case PP_OD_FEATURE_GFXCLK_FMAX: 1084 od_min_setting = overdrive_lowerlimits->GfxclkFmax; 1085 od_max_setting = overdrive_upperlimits->GfxclkFmax; 1086 break; 1087 case PP_OD_FEATURE_UCLK_FMIN: 1088 od_min_setting = overdrive_lowerlimits->UclkFmin; 1089 od_max_setting = overdrive_upperlimits->UclkFmin; 1090 break; 1091 case PP_OD_FEATURE_UCLK_FMAX: 1092 od_min_setting = overdrive_lowerlimits->UclkFmax; 1093 od_max_setting = overdrive_upperlimits->UclkFmax; 1094 break; 1095 case PP_OD_FEATURE_GFX_VF_CURVE: 1096 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0]; 1097 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0]; 1098 break; 1099 case PP_OD_FEATURE_FAN_CURVE_TEMP: 1100 od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0]; 1101 od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0]; 1102 break; 1103 case PP_OD_FEATURE_FAN_CURVE_PWM: 1104 od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0]; 1105 od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0]; 1106 break; 1107 case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT: 1108 od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold; 1109 od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold; 1110 break; 1111 case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET: 1112 od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold; 1113 od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold; 1114 break; 1115 case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE: 1116 od_min_setting = overdrive_lowerlimits->FanTargetTemperature; 1117 od_max_setting = overdrive_upperlimits->FanTargetTemperature; 1118 break; 1119 case PP_OD_FEATURE_FAN_MINIMUM_PWM: 1120 od_min_setting = overdrive_lowerlimits->FanMinimumPwm; 1121 od_max_setting = overdrive_upperlimits->FanMinimumPwm; 1122 break; 1123 default: 1124 od_min_setting = od_max_setting = INT_MAX; 1125 break; 1126 } 1127 1128 if (min) 1129 *min = od_min_setting; 1130 if (max) 1131 *max = od_max_setting; 1132 } 1133 1134 static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, 1135 enum smu_clk_type clk_type, 1136 char *buf) 1137 { 1138 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1139 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1140 OverDriveTableExternal_t *od_table = 1141 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; 1142 struct smu_14_0_dpm_table *single_dpm_table; 1143 struct smu_14_0_pcie_table *pcie_table; 1144 uint32_t gen_speed, lane_width; 1145 int i, curr_freq, size = 0; 1146 int32_t min_value, max_value; 1147 int ret = 0; 1148 1149 smu_cmn_get_sysfs_buf(&buf, &size); 1150 1151 if (amdgpu_ras_intr_triggered()) { 1152 size += sysfs_emit_at(buf, size, "unavailable\n"); 1153 return size; 1154 } 1155 1156 switch (clk_type) { 1157 case SMU_SCLK: 1158 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1159 break; 1160 case SMU_MCLK: 1161 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1162 break; 1163 case SMU_SOCCLK: 1164 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1165 break; 1166 case SMU_FCLK: 1167 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1168 break; 1169 case SMU_VCLK: 1170 case SMU_VCLK1: 1171 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1172 break; 1173 case SMU_DCLK: 1174 case SMU_DCLK1: 1175 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1176 break; 1177 case SMU_DCEFCLK: 1178 single_dpm_table = &(dpm_context->dpm_tables.dcef_table); 1179 break; 1180 default: 1181 break; 1182 } 1183 1184 switch (clk_type) { 1185 case SMU_SCLK: 1186 case SMU_MCLK: 1187 case SMU_SOCCLK: 1188 case SMU_FCLK: 1189 case SMU_VCLK: 1190 case SMU_VCLK1: 1191 case SMU_DCLK: 1192 case SMU_DCLK1: 1193 case SMU_DCEFCLK: 1194 ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); 1195 if (ret) { 1196 dev_err(smu->adev->dev, "Failed to get current clock freq!"); 1197 return ret; 1198 } 1199 1200 if (single_dpm_table->is_fine_grained) { 1201 /* 1202 * For fine grained dpms, there are only two dpm levels: 1203 * - level 0 -> min clock freq 1204 * - level 1 -> max clock freq 1205 * And the current clock frequency can be any value between them. 1206 * So, if the current clock frequency is not at level 0 or level 1, 1207 * we will fake it as three dpm levels: 1208 * - level 0 -> min clock freq 1209 * - level 1 -> current actual clock freq 1210 * - level 2 -> max clock freq 1211 */ 1212 if ((single_dpm_table->dpm_levels[0].value != curr_freq) && 1213 (single_dpm_table->dpm_levels[1].value != curr_freq)) { 1214 size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1215 single_dpm_table->dpm_levels[0].value); 1216 size += sysfs_emit_at(buf, size, "1: %uMhz *\n", 1217 curr_freq); 1218 size += sysfs_emit_at(buf, size, "2: %uMhz\n", 1219 single_dpm_table->dpm_levels[1].value); 1220 } else { 1221 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", 1222 single_dpm_table->dpm_levels[0].value, 1223 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); 1224 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1225 single_dpm_table->dpm_levels[1].value, 1226 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); 1227 } 1228 } else { 1229 for (i = 0; i < single_dpm_table->count; i++) 1230 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 1231 i, single_dpm_table->dpm_levels[i].value, 1232 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); 1233 } 1234 break; 1235 case SMU_PCIE: 1236 ret = smu_v14_0_2_get_smu_metrics_data(smu, 1237 METRICS_PCIE_RATE, 1238 &gen_speed); 1239 if (ret) 1240 return ret; 1241 1242 ret = smu_v14_0_2_get_smu_metrics_data(smu, 1243 METRICS_PCIE_WIDTH, 1244 &lane_width); 1245 if (ret) 1246 return ret; 1247 1248 pcie_table = &(dpm_context->dpm_tables.pcie_table); 1249 for (i = 0; i < pcie_table->num_of_link_levels; i++) 1250 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, 1251 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : 1252 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : 1253 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : 1254 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", 1255 (pcie_table->pcie_lane[i] == 1) ? "x1" : 1256 (pcie_table->pcie_lane[i] == 2) ? "x2" : 1257 (pcie_table->pcie_lane[i] == 3) ? "x4" : 1258 (pcie_table->pcie_lane[i] == 4) ? "x8" : 1259 (pcie_table->pcie_lane[i] == 5) ? "x12" : 1260 (pcie_table->pcie_lane[i] == 6) ? "x16" : "", 1261 pcie_table->clk_freq[i], 1262 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && 1263 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? 1264 "*" : ""); 1265 break; 1266 1267 case SMU_OD_SCLK: 1268 if (!smu_v14_0_2_is_od_feature_supported(smu, 1269 PP_OD_FEATURE_GFXCLK_BIT)) 1270 break; 1271 1272 size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); 1273 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", 1274 od_table->OverDriveTable.GfxclkFmin, 1275 od_table->OverDriveTable.GfxclkFmax); 1276 break; 1277 1278 case SMU_OD_MCLK: 1279 if (!smu_v14_0_2_is_od_feature_supported(smu, 1280 PP_OD_FEATURE_UCLK_BIT)) 1281 break; 1282 1283 size += sysfs_emit_at(buf, size, "OD_MCLK:\n"); 1284 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", 1285 od_table->OverDriveTable.UclkFmin, 1286 od_table->OverDriveTable.UclkFmax); 1287 break; 1288 1289 case SMU_OD_VDDGFX_OFFSET: 1290 if (!smu_v14_0_2_is_od_feature_supported(smu, 1291 PP_OD_FEATURE_GFX_VF_CURVE_BIT)) 1292 break; 1293 1294 size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n"); 1295 size += sysfs_emit_at(buf, size, "%dmV\n", 1296 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]); 1297 break; 1298 1299 case SMU_OD_FAN_CURVE: 1300 if (!smu_v14_0_2_is_od_feature_supported(smu, 1301 PP_OD_FEATURE_FAN_CURVE_BIT)) 1302 break; 1303 1304 size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n"); 1305 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) 1306 size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n", 1307 i, 1308 (int)od_table->OverDriveTable.FanLinearTempPoints[i], 1309 (int)od_table->OverDriveTable.FanLinearPwmPoints[i]); 1310 1311 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1312 smu_v14_0_2_get_od_setting_limits(smu, 1313 PP_OD_FEATURE_FAN_CURVE_TEMP, 1314 &min_value, 1315 &max_value); 1316 size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n", 1317 min_value, max_value); 1318 1319 smu_v14_0_2_get_od_setting_limits(smu, 1320 PP_OD_FEATURE_FAN_CURVE_PWM, 1321 &min_value, 1322 &max_value); 1323 size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n", 1324 min_value, max_value); 1325 1326 break; 1327 1328 case SMU_OD_ACOUSTIC_LIMIT: 1329 if (!smu_v14_0_2_is_od_feature_supported(smu, 1330 PP_OD_FEATURE_FAN_CURVE_BIT)) 1331 break; 1332 1333 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n"); 1334 size += sysfs_emit_at(buf, size, "%d\n", 1335 (int)od_table->OverDriveTable.AcousticLimitRpmThreshold); 1336 1337 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1338 smu_v14_0_2_get_od_setting_limits(smu, 1339 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT, 1340 &min_value, 1341 &max_value); 1342 size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n", 1343 min_value, max_value); 1344 break; 1345 1346 case SMU_OD_ACOUSTIC_TARGET: 1347 if (!smu_v14_0_2_is_od_feature_supported(smu, 1348 PP_OD_FEATURE_FAN_CURVE_BIT)) 1349 break; 1350 1351 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n"); 1352 size += sysfs_emit_at(buf, size, "%d\n", 1353 (int)od_table->OverDriveTable.AcousticTargetRpmThreshold); 1354 1355 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1356 smu_v14_0_2_get_od_setting_limits(smu, 1357 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET, 1358 &min_value, 1359 &max_value); 1360 size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n", 1361 min_value, max_value); 1362 break; 1363 1364 case SMU_OD_FAN_TARGET_TEMPERATURE: 1365 if (!smu_v14_0_2_is_od_feature_supported(smu, 1366 PP_OD_FEATURE_FAN_CURVE_BIT)) 1367 break; 1368 1369 size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n"); 1370 size += sysfs_emit_at(buf, size, "%d\n", 1371 (int)od_table->OverDriveTable.FanTargetTemperature); 1372 1373 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1374 smu_v14_0_2_get_od_setting_limits(smu, 1375 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE, 1376 &min_value, 1377 &max_value); 1378 size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n", 1379 min_value, max_value); 1380 break; 1381 1382 case SMU_OD_FAN_MINIMUM_PWM: 1383 if (!smu_v14_0_2_is_od_feature_supported(smu, 1384 PP_OD_FEATURE_FAN_CURVE_BIT)) 1385 break; 1386 1387 size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n"); 1388 size += sysfs_emit_at(buf, size, "%d\n", 1389 (int)od_table->OverDriveTable.FanMinimumPwm); 1390 1391 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1392 smu_v14_0_2_get_od_setting_limits(smu, 1393 PP_OD_FEATURE_FAN_MINIMUM_PWM, 1394 &min_value, 1395 &max_value); 1396 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n", 1397 min_value, max_value); 1398 break; 1399 1400 case SMU_OD_RANGE: 1401 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && 1402 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && 1403 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) 1404 break; 1405 1406 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1407 1408 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { 1409 smu_v14_0_2_get_od_setting_limits(smu, 1410 PP_OD_FEATURE_GFXCLK_FMIN, 1411 &min_value, 1412 NULL); 1413 smu_v14_0_2_get_od_setting_limits(smu, 1414 PP_OD_FEATURE_GFXCLK_FMAX, 1415 NULL, 1416 &max_value); 1417 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 1418 min_value, max_value); 1419 } 1420 1421 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { 1422 smu_v14_0_2_get_od_setting_limits(smu, 1423 PP_OD_FEATURE_UCLK_FMIN, 1424 &min_value, 1425 NULL); 1426 smu_v14_0_2_get_od_setting_limits(smu, 1427 PP_OD_FEATURE_UCLK_FMAX, 1428 NULL, 1429 &max_value); 1430 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", 1431 min_value, max_value); 1432 } 1433 1434 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { 1435 smu_v14_0_2_get_od_setting_limits(smu, 1436 PP_OD_FEATURE_GFX_VF_CURVE, 1437 &min_value, 1438 &max_value); 1439 size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n", 1440 min_value, max_value); 1441 } 1442 break; 1443 1444 default: 1445 break; 1446 } 1447 1448 return size; 1449 } 1450 1451 static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, 1452 enum smu_clk_type clk_type, 1453 uint32_t mask) 1454 { 1455 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1456 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1457 struct smu_14_0_dpm_table *single_dpm_table; 1458 uint32_t soft_min_level, soft_max_level; 1459 uint32_t min_freq, max_freq; 1460 int ret = 0; 1461 1462 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1463 soft_max_level = mask ? (fls(mask) - 1) : 0; 1464 1465 switch (clk_type) { 1466 case SMU_GFXCLK: 1467 case SMU_SCLK: 1468 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1469 break; 1470 case SMU_MCLK: 1471 case SMU_UCLK: 1472 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1473 break; 1474 case SMU_SOCCLK: 1475 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1476 break; 1477 case SMU_FCLK: 1478 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1479 break; 1480 case SMU_VCLK: 1481 case SMU_VCLK1: 1482 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1483 break; 1484 case SMU_DCLK: 1485 case SMU_DCLK1: 1486 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1487 break; 1488 default: 1489 break; 1490 } 1491 1492 switch (clk_type) { 1493 case SMU_GFXCLK: 1494 case SMU_SCLK: 1495 case SMU_MCLK: 1496 case SMU_UCLK: 1497 case SMU_SOCCLK: 1498 case SMU_FCLK: 1499 case SMU_VCLK: 1500 case SMU_VCLK1: 1501 case SMU_DCLK: 1502 case SMU_DCLK1: 1503 if (single_dpm_table->is_fine_grained) { 1504 /* There is only 2 levels for fine grained DPM */ 1505 soft_max_level = (soft_max_level >= 1 ? 1 : 0); 1506 soft_min_level = (soft_min_level >= 1 ? 1 : 0); 1507 } else { 1508 if ((soft_max_level >= single_dpm_table->count) || 1509 (soft_min_level >= single_dpm_table->count)) 1510 return -EINVAL; 1511 } 1512 1513 min_freq = single_dpm_table->dpm_levels[soft_min_level].value; 1514 max_freq = single_dpm_table->dpm_levels[soft_max_level].value; 1515 1516 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1517 clk_type, 1518 min_freq, 1519 max_freq); 1520 break; 1521 case SMU_DCEFCLK: 1522 case SMU_PCIE: 1523 default: 1524 break; 1525 } 1526 1527 return ret; 1528 } 1529 1530 static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, 1531 uint8_t pcie_gen_cap, 1532 uint8_t pcie_width_cap) 1533 { 1534 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 1535 struct smu_14_0_pcie_table *pcie_table = 1536 &dpm_context->dpm_tables.pcie_table; 1537 uint32_t smu_pcie_arg; 1538 int ret, i; 1539 1540 for (i = 0; i < pcie_table->num_of_link_levels; i++) { 1541 if (pcie_table->pcie_gen[i] > pcie_gen_cap) 1542 pcie_table->pcie_gen[i] = pcie_gen_cap; 1543 if (pcie_table->pcie_lane[i] > pcie_width_cap) 1544 pcie_table->pcie_lane[i] = pcie_width_cap; 1545 1546 smu_pcie_arg = i << 16; 1547 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; 1548 smu_pcie_arg |= pcie_table->pcie_lane[i]; 1549 1550 ret = smu_cmn_send_smc_msg_with_param(smu, 1551 SMU_MSG_OverridePcieParameters, 1552 smu_pcie_arg, 1553 NULL); 1554 if (ret) 1555 return ret; 1556 } 1557 1558 return 0; 1559 } 1560 1561 static const struct smu_temperature_range smu14_thermal_policy[] = { 1562 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, 1563 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, 1564 }; 1565 1566 static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu, 1567 struct smu_temperature_range *range) 1568 { 1569 struct smu_table_context *table_context = &smu->smu_table; 1570 struct smu_14_0_2_powerplay_table *powerplay_table = 1571 table_context->power_play_table; 1572 PPTable_t *pptable = smu->smu_table.driver_pptable; 1573 1574 if (amdgpu_sriov_vf(smu->adev)) 1575 return 0; 1576 1577 if (!range) 1578 return -EINVAL; 1579 1580 memcpy(range, &smu14_thermal_policy[0], sizeof(struct smu_temperature_range)); 1581 1582 range->max = pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] * 1583 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1584 range->edge_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * 1585 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1586 range->hotspot_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] * 1587 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1588 range->hotspot_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * 1589 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1590 range->mem_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] * 1591 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1592 range->mem_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* 1593 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1594 range->software_shutdown_temp = powerplay_table->software_shutdown_temp; 1595 range->software_shutdown_temp_offset = pptable->CustomSkuTable.FanAbnormalTempLimitOffset; 1596 1597 return 0; 1598 } 1599 1600 static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu) 1601 { 1602 struct smu_14_0_dpm_context *dpm_context = 1603 smu->smu_dpm.dpm_context; 1604 struct smu_14_0_dpm_table *gfx_table = 1605 &dpm_context->dpm_tables.gfx_table; 1606 struct smu_14_0_dpm_table *mem_table = 1607 &dpm_context->dpm_tables.uclk_table; 1608 struct smu_14_0_dpm_table *soc_table = 1609 &dpm_context->dpm_tables.soc_table; 1610 struct smu_14_0_dpm_table *vclk_table = 1611 &dpm_context->dpm_tables.vclk_table; 1612 struct smu_14_0_dpm_table *dclk_table = 1613 &dpm_context->dpm_tables.dclk_table; 1614 struct smu_14_0_dpm_table *fclk_table = 1615 &dpm_context->dpm_tables.fclk_table; 1616 struct smu_umd_pstate_table *pstate_table = 1617 &smu->pstate_table; 1618 struct smu_table_context *table_context = &smu->smu_table; 1619 PPTable_t *pptable = table_context->driver_pptable; 1620 DriverReportedClocks_t driver_clocks = 1621 pptable->SkuTable.DriverReportedClocks; 1622 1623 pstate_table->gfxclk_pstate.min = gfx_table->min; 1624 if (driver_clocks.GameClockAc && 1625 (driver_clocks.GameClockAc < gfx_table->max)) 1626 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; 1627 else 1628 pstate_table->gfxclk_pstate.peak = gfx_table->max; 1629 1630 pstate_table->uclk_pstate.min = mem_table->min; 1631 pstate_table->uclk_pstate.peak = mem_table->max; 1632 1633 pstate_table->socclk_pstate.min = soc_table->min; 1634 pstate_table->socclk_pstate.peak = soc_table->max; 1635 1636 pstate_table->vclk_pstate.min = vclk_table->min; 1637 pstate_table->vclk_pstate.peak = vclk_table->max; 1638 1639 pstate_table->dclk_pstate.min = dclk_table->min; 1640 pstate_table->dclk_pstate.peak = dclk_table->max; 1641 1642 pstate_table->fclk_pstate.min = fclk_table->min; 1643 pstate_table->fclk_pstate.peak = fclk_table->max; 1644 1645 if (driver_clocks.BaseClockAc && 1646 driver_clocks.BaseClockAc < gfx_table->max) 1647 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; 1648 else 1649 pstate_table->gfxclk_pstate.standard = gfx_table->max; 1650 pstate_table->uclk_pstate.standard = mem_table->max; 1651 pstate_table->socclk_pstate.standard = soc_table->min; 1652 pstate_table->vclk_pstate.standard = vclk_table->min; 1653 pstate_table->dclk_pstate.standard = dclk_table->min; 1654 pstate_table->fclk_pstate.standard = fclk_table->min; 1655 1656 return 0; 1657 } 1658 1659 static void smu_v14_0_2_get_unique_id(struct smu_context *smu) 1660 { 1661 struct smu_table_context *smu_table = &smu->smu_table; 1662 SmuMetrics_t *metrics = 1663 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 1664 struct amdgpu_device *adev = smu->adev; 1665 uint32_t upper32 = 0, lower32 = 0; 1666 int ret; 1667 1668 ret = smu_cmn_get_metrics_table(smu, NULL, false); 1669 if (ret) 1670 goto out; 1671 1672 upper32 = metrics->PublicSerialNumberUpper; 1673 lower32 = metrics->PublicSerialNumberLower; 1674 1675 out: 1676 adev->unique_id = ((uint64_t)upper32 << 32) | lower32; 1677 } 1678 1679 static int smu_v14_0_2_get_power_limit(struct smu_context *smu, 1680 uint32_t *current_power_limit, 1681 uint32_t *default_power_limit, 1682 uint32_t *max_power_limit, 1683 uint32_t *min_power_limit) 1684 { 1685 struct smu_table_context *table_context = &smu->smu_table; 1686 PPTable_t *pptable = table_context->driver_pptable; 1687 CustomSkuTable_t *skutable = &pptable->CustomSkuTable; 1688 uint32_t power_limit; 1689 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; 1690 1691 if (smu_v14_0_get_current_power_limit(smu, &power_limit)) 1692 power_limit = smu->adev->pm.ac_power ? 1693 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : 1694 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; 1695 1696 if (current_power_limit) 1697 *current_power_limit = power_limit; 1698 if (default_power_limit) 1699 *default_power_limit = power_limit; 1700 1701 if (max_power_limit) 1702 *max_power_limit = msg_limit; 1703 1704 if (min_power_limit) 1705 *min_power_limit = 0; 1706 1707 return 0; 1708 } 1709 1710 static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, 1711 char *buf) 1712 { 1713 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1714 DpmActivityMonitorCoeffInt_t *activity_monitor = 1715 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1716 static const char *title[] = { 1717 "PROFILE_INDEX(NAME)", 1718 "CLOCK_TYPE(NAME)", 1719 "FPS", 1720 "MinActiveFreqType", 1721 "MinActiveFreq", 1722 "BoosterFreqType", 1723 "BoosterFreq", 1724 "PD_Data_limit_c", 1725 "PD_Data_error_coeff", 1726 "PD_Data_error_rate_coeff"}; 1727 int16_t workload_type = 0; 1728 uint32_t i, size = 0; 1729 int result = 0; 1730 1731 if (!buf) 1732 return -EINVAL; 1733 1734 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n", 1735 title[0], title[1], title[2], title[3], title[4], title[5], 1736 title[6], title[7], title[8], title[9]); 1737 1738 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 1739 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1740 workload_type = smu_cmn_to_asic_specific_index(smu, 1741 CMN2ASIC_MAPPING_WORKLOAD, 1742 i); 1743 if (workload_type == -ENOTSUPP) 1744 continue; 1745 else if (workload_type < 0) 1746 return -EINVAL; 1747 1748 result = smu_cmn_update_table(smu, 1749 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1750 workload_type, 1751 (void *)(&activity_monitor_external), 1752 false); 1753 if (result) { 1754 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1755 return result; 1756 } 1757 1758 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", 1759 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1760 1761 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 1762 " ", 1763 0, 1764 "GFXCLK", 1765 activity_monitor->Gfx_FPS, 1766 activity_monitor->Gfx_MinActiveFreqType, 1767 activity_monitor->Gfx_MinActiveFreq, 1768 activity_monitor->Gfx_BoosterFreqType, 1769 activity_monitor->Gfx_BoosterFreq, 1770 activity_monitor->Gfx_PD_Data_limit_c, 1771 activity_monitor->Gfx_PD_Data_error_coeff, 1772 activity_monitor->Gfx_PD_Data_error_rate_coeff); 1773 1774 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 1775 " ", 1776 1, 1777 "FCLK", 1778 activity_monitor->Fclk_FPS, 1779 activity_monitor->Fclk_MinActiveFreqType, 1780 activity_monitor->Fclk_MinActiveFreq, 1781 activity_monitor->Fclk_BoosterFreqType, 1782 activity_monitor->Fclk_BoosterFreq, 1783 activity_monitor->Fclk_PD_Data_limit_c, 1784 activity_monitor->Fclk_PD_Data_error_coeff, 1785 activity_monitor->Fclk_PD_Data_error_rate_coeff); 1786 } 1787 1788 return size; 1789 } 1790 1791 static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, 1792 long *input, 1793 uint32_t size) 1794 { 1795 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1796 DpmActivityMonitorCoeffInt_t *activity_monitor = 1797 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1798 int workload_type, ret = 0; 1799 1800 smu->power_profile_mode = input[size]; 1801 1802 if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { 1803 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); 1804 return -EINVAL; 1805 } 1806 1807 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1808 if (size != 9) 1809 return -EINVAL; 1810 1811 ret = smu_cmn_update_table(smu, 1812 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1813 WORKLOAD_PPLIB_CUSTOM_BIT, 1814 (void *)(&activity_monitor_external), 1815 false); 1816 if (ret) { 1817 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1818 return ret; 1819 } 1820 1821 switch (input[0]) { 1822 case 0: /* Gfxclk */ 1823 activity_monitor->Gfx_FPS = input[1]; 1824 activity_monitor->Gfx_MinActiveFreqType = input[2]; 1825 activity_monitor->Gfx_MinActiveFreq = input[3]; 1826 activity_monitor->Gfx_BoosterFreqType = input[4]; 1827 activity_monitor->Gfx_BoosterFreq = input[5]; 1828 activity_monitor->Gfx_PD_Data_limit_c = input[6]; 1829 activity_monitor->Gfx_PD_Data_error_coeff = input[7]; 1830 activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; 1831 break; 1832 case 1: /* Fclk */ 1833 activity_monitor->Fclk_FPS = input[1]; 1834 activity_monitor->Fclk_MinActiveFreqType = input[2]; 1835 activity_monitor->Fclk_MinActiveFreq = input[3]; 1836 activity_monitor->Fclk_BoosterFreqType = input[4]; 1837 activity_monitor->Fclk_BoosterFreq = input[5]; 1838 activity_monitor->Fclk_PD_Data_limit_c = input[6]; 1839 activity_monitor->Fclk_PD_Data_error_coeff = input[7]; 1840 activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; 1841 break; 1842 default: 1843 return -EINVAL; 1844 } 1845 1846 ret = smu_cmn_update_table(smu, 1847 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1848 WORKLOAD_PPLIB_CUSTOM_BIT, 1849 (void *)(&activity_monitor_external), 1850 true); 1851 if (ret) { 1852 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 1853 return ret; 1854 } 1855 } 1856 1857 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1858 workload_type = smu_cmn_to_asic_specific_index(smu, 1859 CMN2ASIC_MAPPING_WORKLOAD, 1860 smu->power_profile_mode); 1861 if (workload_type < 0) 1862 return -EINVAL; 1863 1864 return smu_cmn_send_smc_msg_with_param(smu, 1865 SMU_MSG_SetWorkloadMask, 1866 1 << workload_type, 1867 NULL); 1868 } 1869 1870 static int smu_v14_0_2_baco_enter(struct smu_context *smu) 1871 { 1872 struct smu_baco_context *smu_baco = &smu->smu_baco; 1873 struct amdgpu_device *adev = smu->adev; 1874 1875 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) 1876 return smu_v14_0_baco_set_armd3_sequence(smu, 1877 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); 1878 else 1879 return smu_v14_0_baco_enter(smu); 1880 } 1881 1882 static int smu_v14_0_2_baco_exit(struct smu_context *smu) 1883 { 1884 struct amdgpu_device *adev = smu->adev; 1885 1886 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 1887 /* Wait for PMFW handling for the Dstate change */ 1888 usleep_range(10000, 11000); 1889 return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 1890 } else { 1891 return smu_v14_0_baco_exit(smu); 1892 } 1893 } 1894 1895 static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu) 1896 { 1897 // TODO 1898 1899 return true; 1900 } 1901 1902 static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap, 1903 struct i2c_msg *msg, int num_msgs) 1904 { 1905 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); 1906 struct amdgpu_device *adev = smu_i2c->adev; 1907 struct smu_context *smu = adev->powerplay.pp_handle; 1908 struct smu_table_context *smu_table = &smu->smu_table; 1909 struct smu_table *table = &smu_table->driver_table; 1910 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; 1911 int i, j, r, c; 1912 u16 dir; 1913 1914 if (!adev->pm.dpm_enabled) 1915 return -EBUSY; 1916 1917 req = kzalloc(sizeof(*req), GFP_KERNEL); 1918 if (!req) 1919 return -ENOMEM; 1920 1921 req->I2CcontrollerPort = smu_i2c->port; 1922 req->I2CSpeed = I2C_SPEED_FAST_400K; 1923 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ 1924 dir = msg[0].flags & I2C_M_RD; 1925 1926 for (c = i = 0; i < num_msgs; i++) { 1927 for (j = 0; j < msg[i].len; j++, c++) { 1928 SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; 1929 1930 if (!(msg[i].flags & I2C_M_RD)) { 1931 /* write */ 1932 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; 1933 cmd->ReadWriteData = msg[i].buf[j]; 1934 } 1935 1936 if ((dir ^ msg[i].flags) & I2C_M_RD) { 1937 /* The direction changes. 1938 */ 1939 dir = msg[i].flags & I2C_M_RD; 1940 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; 1941 } 1942 1943 req->NumCmds++; 1944 1945 /* 1946 * Insert STOP if we are at the last byte of either last 1947 * message for the transaction or the client explicitly 1948 * requires a STOP at this particular message. 1949 */ 1950 if ((j == msg[i].len - 1) && 1951 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { 1952 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; 1953 cmd->CmdConfig |= CMDCONFIG_STOP_MASK; 1954 } 1955 } 1956 } 1957 mutex_lock(&adev->pm.mutex); 1958 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); 1959 mutex_unlock(&adev->pm.mutex); 1960 if (r) 1961 goto fail; 1962 1963 for (c = i = 0; i < num_msgs; i++) { 1964 if (!(msg[i].flags & I2C_M_RD)) { 1965 c += msg[i].len; 1966 continue; 1967 } 1968 for (j = 0; j < msg[i].len; j++, c++) { 1969 SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; 1970 1971 msg[i].buf[j] = cmd->ReadWriteData; 1972 } 1973 } 1974 r = num_msgs; 1975 fail: 1976 kfree(req); 1977 return r; 1978 } 1979 1980 static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap) 1981 { 1982 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 1983 } 1984 1985 static const struct i2c_algorithm smu_v14_0_2_i2c_algo = { 1986 .master_xfer = smu_v14_0_2_i2c_xfer, 1987 .functionality = smu_v14_0_2_i2c_func, 1988 }; 1989 1990 static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = { 1991 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, 1992 .max_read_len = MAX_SW_I2C_COMMANDS, 1993 .max_write_len = MAX_SW_I2C_COMMANDS, 1994 .max_comb_1st_msg_len = 2, 1995 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, 1996 }; 1997 1998 static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) 1999 { 2000 struct amdgpu_device *adev = smu->adev; 2001 int res, i; 2002 2003 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 2004 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2005 struct i2c_adapter *control = &smu_i2c->adapter; 2006 2007 smu_i2c->adev = adev; 2008 smu_i2c->port = i; 2009 mutex_init(&smu_i2c->mutex); 2010 control->owner = THIS_MODULE; 2011 control->dev.parent = &adev->pdev->dev; 2012 control->algo = &smu_v14_0_2_i2c_algo; 2013 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); 2014 control->quirks = &smu_v14_0_2_i2c_control_quirks; 2015 i2c_set_adapdata(control, smu_i2c); 2016 2017 res = i2c_add_adapter(control); 2018 if (res) { 2019 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 2020 goto Out_err; 2021 } 2022 } 2023 2024 /* assign the buses used for the FRU EEPROM and RAS EEPROM */ 2025 /* XXX ideally this would be something in a vbios data table */ 2026 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; 2027 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 2028 2029 return 0; 2030 Out_err: 2031 for ( ; i >= 0; i--) { 2032 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2033 struct i2c_adapter *control = &smu_i2c->adapter; 2034 2035 i2c_del_adapter(control); 2036 } 2037 return res; 2038 } 2039 2040 static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu) 2041 { 2042 struct amdgpu_device *adev = smu->adev; 2043 int i; 2044 2045 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 2046 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2047 struct i2c_adapter *control = &smu_i2c->adapter; 2048 2049 i2c_del_adapter(control); 2050 } 2051 adev->pm.ras_eeprom_i2c_bus = NULL; 2052 adev->pm.fru_eeprom_i2c_bus = NULL; 2053 } 2054 2055 static int smu_v14_0_2_set_mp1_state(struct smu_context *smu, 2056 enum pp_mp1_state mp1_state) 2057 { 2058 int ret; 2059 2060 switch (mp1_state) { 2061 case PP_MP1_STATE_UNLOAD: 2062 ret = smu_cmn_set_mp1_state(smu, mp1_state); 2063 break; 2064 default: 2065 /* Ignore others */ 2066 ret = 0; 2067 } 2068 2069 return ret; 2070 } 2071 2072 static int smu_v14_0_2_set_df_cstate(struct smu_context *smu, 2073 enum pp_df_cstate state) 2074 { 2075 return smu_cmn_send_smc_msg_with_param(smu, 2076 SMU_MSG_DFCstateControl, 2077 state, 2078 NULL); 2079 } 2080 2081 static int smu_v14_0_2_mode1_reset(struct smu_context *smu) 2082 { 2083 int ret = 0; 2084 2085 ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset); 2086 if (!ret) { 2087 if (amdgpu_emu_mode == 1) 2088 msleep(50000); 2089 else 2090 msleep(1000); 2091 } 2092 2093 return ret; 2094 } 2095 2096 static int smu_v14_0_2_mode2_reset(struct smu_context *smu) 2097 { 2098 int ret = 0; 2099 2100 // TODO 2101 2102 return ret; 2103 } 2104 2105 static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu) 2106 { 2107 struct amdgpu_device *adev = smu->adev; 2108 2109 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2)) 2110 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, 2111 FEATURE_PWR_GFX, NULL); 2112 else 2113 return -EOPNOTSUPP; 2114 } 2115 2116 static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu) 2117 { 2118 struct amdgpu_device *adev = smu->adev; 2119 2120 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82); 2121 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66); 2122 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90); 2123 2124 smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_53); 2125 smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_75); 2126 smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54); 2127 } 2128 2129 static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu, 2130 void **table) 2131 { 2132 struct smu_table_context *smu_table = &smu->smu_table; 2133 struct gpu_metrics_v1_3 *gpu_metrics = 2134 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 2135 SmuMetricsExternal_t metrics_ext; 2136 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; 2137 int ret = 0; 2138 2139 ret = smu_cmn_get_metrics_table(smu, 2140 &metrics_ext, 2141 true); 2142 if (ret) 2143 return ret; 2144 2145 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 2146 2147 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; 2148 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; 2149 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; 2150 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; 2151 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; 2152 gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0], 2153 metrics->AvgTemperature[TEMP_VR_MEM1]); 2154 2155 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; 2156 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; 2157 gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage, 2158 metrics->Vcn1ActivityPercentage); 2159 2160 gpu_metrics->average_socket_power = metrics->AverageSocketPower; 2161 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; 2162 2163 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) 2164 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; 2165 else 2166 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; 2167 2168 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) 2169 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; 2170 else 2171 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; 2172 2173 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; 2174 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; 2175 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 2176 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 2177 2178 gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency; 2179 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; 2180 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; 2181 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; 2182 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; 2183 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0]; 2184 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0]; 2185 2186 gpu_metrics->throttle_status = 2187 smu_v14_0_2_get_throttler_status(metrics); 2188 gpu_metrics->indep_throttle_status = 2189 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, 2190 smu_v14_0_2_throttler_map); 2191 2192 gpu_metrics->current_fan_speed = metrics->AvgFanRpm; 2193 2194 gpu_metrics->pcie_link_width = metrics->PcieWidth; 2195 if ((metrics->PcieRate - 1) > LINK_SPEED_MAX) 2196 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1); 2197 else 2198 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate); 2199 2200 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 2201 2202 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; 2203 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC]; 2204 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM]; 2205 2206 *table = (void *)gpu_metrics; 2207 2208 return sizeof(struct gpu_metrics_v1_3); 2209 } 2210 2211 static void smu_v14_0_2_dump_od_table(struct smu_context *smu, 2212 OverDriveTableExternal_t *od_table) 2213 { 2214 struct amdgpu_device *adev = smu->adev; 2215 2216 dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, 2217 od_table->OverDriveTable.GfxclkFmax); 2218 dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, 2219 od_table->OverDriveTable.UclkFmax); 2220 } 2221 2222 static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu, 2223 OverDriveTableExternal_t *od_table) 2224 { 2225 int ret; 2226 ret = smu_cmn_update_table(smu, 2227 SMU_TABLE_OVERDRIVE, 2228 0, 2229 (void *)od_table, 2230 true); 2231 if (ret) 2232 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); 2233 2234 return ret; 2235 } 2236 2237 static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu) 2238 { 2239 struct amdgpu_device *adev = smu->adev; 2240 2241 if (smu_v14_0_2_is_od_feature_supported(smu, 2242 PP_OD_FEATURE_FAN_CURVE_BIT)) 2243 adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE | 2244 OD_OPS_SUPPORT_FAN_CURVE_SET | 2245 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE | 2246 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET | 2247 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE | 2248 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET | 2249 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | 2250 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | 2251 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | 2252 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; 2253 } 2254 2255 static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu, 2256 OverDriveTableExternal_t *od_table) 2257 { 2258 int ret; 2259 ret = smu_cmn_update_table(smu, 2260 SMU_TABLE_OVERDRIVE, 2261 0, 2262 (void *)od_table, 2263 false); 2264 if (ret) 2265 dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); 2266 2267 return ret; 2268 } 2269 2270 static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu) 2271 { 2272 OverDriveTableExternal_t *od_table = 2273 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; 2274 OverDriveTableExternal_t *boot_od_table = 2275 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table; 2276 OverDriveTableExternal_t *user_od_table = 2277 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table; 2278 OverDriveTableExternal_t user_od_table_bak; 2279 int ret; 2280 int i; 2281 2282 ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table); 2283 if (ret) 2284 return ret; 2285 2286 smu_v14_0_2_dump_od_table(smu, boot_od_table); 2287 2288 memcpy(od_table, 2289 boot_od_table, 2290 sizeof(OverDriveTableExternal_t)); 2291 2292 /* 2293 * For S3/S4/Runpm resume, we need to setup those overdrive tables again, 2294 * but we have to preserve user defined values in "user_od_table". 2295 */ 2296 if (!smu->adev->in_suspend) { 2297 memcpy(user_od_table, 2298 boot_od_table, 2299 sizeof(OverDriveTableExternal_t)); 2300 smu->user_dpm_profile.user_od = false; 2301 } else if (smu->user_dpm_profile.user_od) { 2302 memcpy(&user_od_table_bak, 2303 user_od_table, 2304 sizeof(OverDriveTableExternal_t)); 2305 memcpy(user_od_table, 2306 boot_od_table, 2307 sizeof(OverDriveTableExternal_t)); 2308 user_od_table->OverDriveTable.GfxclkFmin = 2309 user_od_table_bak.OverDriveTable.GfxclkFmin; 2310 user_od_table->OverDriveTable.GfxclkFmax = 2311 user_od_table_bak.OverDriveTable.GfxclkFmax; 2312 user_od_table->OverDriveTable.UclkFmin = 2313 user_od_table_bak.OverDriveTable.UclkFmin; 2314 user_od_table->OverDriveTable.UclkFmax = 2315 user_od_table_bak.OverDriveTable.UclkFmax; 2316 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) 2317 user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = 2318 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i]; 2319 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) { 2320 user_od_table->OverDriveTable.FanLinearTempPoints[i] = 2321 user_od_table_bak.OverDriveTable.FanLinearTempPoints[i]; 2322 user_od_table->OverDriveTable.FanLinearPwmPoints[i] = 2323 user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i]; 2324 } 2325 user_od_table->OverDriveTable.AcousticLimitRpmThreshold = 2326 user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold; 2327 user_od_table->OverDriveTable.AcousticTargetRpmThreshold = 2328 user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold; 2329 user_od_table->OverDriveTable.FanTargetTemperature = 2330 user_od_table_bak.OverDriveTable.FanTargetTemperature; 2331 user_od_table->OverDriveTable.FanMinimumPwm = 2332 user_od_table_bak.OverDriveTable.FanMinimumPwm; 2333 } 2334 2335 smu_v14_0_2_set_supported_od_feature_mask(smu); 2336 2337 return 0; 2338 } 2339 2340 static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu) 2341 { 2342 struct smu_table_context *table_context = &smu->smu_table; 2343 OverDriveTableExternal_t *od_table = table_context->overdrive_table; 2344 OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table; 2345 int res; 2346 2347 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | 2348 BIT(PP_OD_FEATURE_UCLK_BIT) | 2349 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | 2350 BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2351 res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table); 2352 user_od_table->OverDriveTable.FeatureCtrlMask = 0; 2353 if (res == 0) 2354 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t)); 2355 2356 return res; 2357 } 2358 2359 static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input) 2360 { 2361 struct smu_table_context *table_context = &smu->smu_table; 2362 OverDriveTableExternal_t *boot_overdrive_table = 2363 (OverDriveTableExternal_t *)table_context->boot_overdrive_table; 2364 OverDriveTableExternal_t *od_table = 2365 (OverDriveTableExternal_t *)table_context->overdrive_table; 2366 struct amdgpu_device *adev = smu->adev; 2367 int i; 2368 2369 switch (input) { 2370 case PP_OD_EDIT_FAN_CURVE: 2371 for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) { 2372 od_table->OverDriveTable.FanLinearTempPoints[i] = 2373 boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i]; 2374 od_table->OverDriveTable.FanLinearPwmPoints[i] = 2375 boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i]; 2376 } 2377 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2378 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2379 break; 2380 case PP_OD_EDIT_ACOUSTIC_LIMIT: 2381 od_table->OverDriveTable.AcousticLimitRpmThreshold = 2382 boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold; 2383 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2384 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2385 break; 2386 case PP_OD_EDIT_ACOUSTIC_TARGET: 2387 od_table->OverDriveTable.AcousticTargetRpmThreshold = 2388 boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold; 2389 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2390 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2391 break; 2392 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE: 2393 od_table->OverDriveTable.FanTargetTemperature = 2394 boot_overdrive_table->OverDriveTable.FanTargetTemperature; 2395 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2396 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2397 break; 2398 case PP_OD_EDIT_FAN_MINIMUM_PWM: 2399 od_table->OverDriveTable.FanMinimumPwm = 2400 boot_overdrive_table->OverDriveTable.FanMinimumPwm; 2401 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2402 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2403 break; 2404 default: 2405 dev_info(adev->dev, "Invalid table index: %ld\n", input); 2406 return -EINVAL; 2407 } 2408 2409 return 0; 2410 } 2411 2412 static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, 2413 enum PP_OD_DPM_TABLE_COMMAND type, 2414 long input[], 2415 uint32_t size) 2416 { 2417 struct smu_table_context *table_context = &smu->smu_table; 2418 OverDriveTableExternal_t *od_table = 2419 (OverDriveTableExternal_t *)table_context->overdrive_table; 2420 struct amdgpu_device *adev = smu->adev; 2421 uint32_t offset_of_voltageoffset; 2422 int32_t minimum, maximum; 2423 uint32_t feature_ctrlmask; 2424 int i, ret = 0; 2425 2426 switch (type) { 2427 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2428 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { 2429 dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n"); 2430 return -ENOTSUPP; 2431 } 2432 2433 for (i = 0; i < size; i += 2) { 2434 if (i + 2 > size) { 2435 dev_info(adev->dev, "invalid number of input parameters %d\n", size); 2436 return -EINVAL; 2437 } 2438 2439 switch (input[i]) { 2440 case 0: 2441 smu_v14_0_2_get_od_setting_limits(smu, 2442 PP_OD_FEATURE_GFXCLK_FMIN, 2443 &minimum, 2444 &maximum); 2445 if (input[i + 1] < minimum || 2446 input[i + 1] > maximum) { 2447 dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", 2448 input[i + 1], minimum, maximum); 2449 return -EINVAL; 2450 } 2451 2452 od_table->OverDriveTable.GfxclkFmin = input[i + 1]; 2453 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; 2454 break; 2455 2456 case 1: 2457 smu_v14_0_2_get_od_setting_limits(smu, 2458 PP_OD_FEATURE_GFXCLK_FMAX, 2459 &minimum, 2460 &maximum); 2461 if (input[i + 1] < minimum || 2462 input[i + 1] > maximum) { 2463 dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n", 2464 input[i + 1], minimum, maximum); 2465 return -EINVAL; 2466 } 2467 2468 od_table->OverDriveTable.GfxclkFmax = input[i + 1]; 2469 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; 2470 break; 2471 2472 default: 2473 dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); 2474 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); 2475 return -EINVAL; 2476 } 2477 } 2478 2479 if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { 2480 dev_err(adev->dev, 2481 "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", 2482 (uint32_t)od_table->OverDriveTable.GfxclkFmin, 2483 (uint32_t)od_table->OverDriveTable.GfxclkFmax); 2484 return -EINVAL; 2485 } 2486 break; 2487 2488 case PP_OD_EDIT_MCLK_VDDC_TABLE: 2489 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { 2490 dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n"); 2491 return -ENOTSUPP; 2492 } 2493 2494 for (i = 0; i < size; i += 2) { 2495 if (i + 2 > size) { 2496 dev_info(adev->dev, "invalid number of input parameters %d\n", size); 2497 return -EINVAL; 2498 } 2499 2500 switch (input[i]) { 2501 case 0: 2502 smu_v14_0_2_get_od_setting_limits(smu, 2503 PP_OD_FEATURE_UCLK_FMIN, 2504 &minimum, 2505 &maximum); 2506 if (input[i + 1] < minimum || 2507 input[i + 1] > maximum) { 2508 dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n", 2509 input[i + 1], minimum, maximum); 2510 return -EINVAL; 2511 } 2512 2513 od_table->OverDriveTable.UclkFmin = input[i + 1]; 2514 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; 2515 break; 2516 2517 case 1: 2518 smu_v14_0_2_get_od_setting_limits(smu, 2519 PP_OD_FEATURE_UCLK_FMAX, 2520 &minimum, 2521 &maximum); 2522 if (input[i + 1] < minimum || 2523 input[i + 1] > maximum) { 2524 dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n", 2525 input[i + 1], minimum, maximum); 2526 return -EINVAL; 2527 } 2528 2529 od_table->OverDriveTable.UclkFmax = input[i + 1]; 2530 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; 2531 break; 2532 2533 default: 2534 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]); 2535 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); 2536 return -EINVAL; 2537 } 2538 } 2539 2540 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) { 2541 dev_err(adev->dev, 2542 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n", 2543 (uint32_t)od_table->OverDriveTable.UclkFmin, 2544 (uint32_t)od_table->OverDriveTable.UclkFmax); 2545 return -EINVAL; 2546 } 2547 break; 2548 2549 case PP_OD_EDIT_VDDGFX_OFFSET: 2550 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { 2551 dev_warn(adev->dev, "Gfx offset setting not supported!\n"); 2552 return -ENOTSUPP; 2553 } 2554 2555 smu_v14_0_2_get_od_setting_limits(smu, 2556 PP_OD_FEATURE_GFX_VF_CURVE, 2557 &minimum, 2558 &maximum); 2559 if (input[0] < minimum || 2560 input[0] > maximum) { 2561 dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", 2562 input[0], minimum, maximum); 2563 return -EINVAL; 2564 } 2565 2566 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) 2567 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0]; 2568 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT); 2569 break; 2570 2571 case PP_OD_EDIT_FAN_CURVE: 2572 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { 2573 dev_warn(adev->dev, "Fan curve setting not supported!\n"); 2574 return -ENOTSUPP; 2575 } 2576 2577 if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 || 2578 input[0] < 0) 2579 return -EINVAL; 2580 2581 smu_v14_0_2_get_od_setting_limits(smu, 2582 PP_OD_FEATURE_FAN_CURVE_TEMP, 2583 &minimum, 2584 &maximum); 2585 if (input[1] < minimum || 2586 input[1] > maximum) { 2587 dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n", 2588 input[1], minimum, maximum); 2589 return -EINVAL; 2590 } 2591 2592 smu_v14_0_2_get_od_setting_limits(smu, 2593 PP_OD_FEATURE_FAN_CURVE_PWM, 2594 &minimum, 2595 &maximum); 2596 if (input[2] < minimum || 2597 input[2] > maximum) { 2598 dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n", 2599 input[2], minimum, maximum); 2600 return -EINVAL; 2601 } 2602 2603 od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1]; 2604 od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2]; 2605 od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR; 2606 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2607 break; 2608 2609 case PP_OD_EDIT_ACOUSTIC_LIMIT: 2610 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { 2611 dev_warn(adev->dev, "Fan curve setting not supported!\n"); 2612 return -ENOTSUPP; 2613 } 2614 2615 smu_v14_0_2_get_od_setting_limits(smu, 2616 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT, 2617 &minimum, 2618 &maximum); 2619 if (input[0] < minimum || 2620 input[0] > maximum) { 2621 dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n", 2622 input[0], minimum, maximum); 2623 return -EINVAL; 2624 } 2625 2626 od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0]; 2627 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2628 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2629 break; 2630 2631 case PP_OD_EDIT_ACOUSTIC_TARGET: 2632 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { 2633 dev_warn(adev->dev, "Fan curve setting not supported!\n"); 2634 return -ENOTSUPP; 2635 } 2636 2637 smu_v14_0_2_get_od_setting_limits(smu, 2638 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET, 2639 &minimum, 2640 &maximum); 2641 if (input[0] < minimum || 2642 input[0] > maximum) { 2643 dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n", 2644 input[0], minimum, maximum); 2645 return -EINVAL; 2646 } 2647 2648 od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0]; 2649 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2650 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2651 break; 2652 2653 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE: 2654 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { 2655 dev_warn(adev->dev, "Fan curve setting not supported!\n"); 2656 return -ENOTSUPP; 2657 } 2658 2659 smu_v14_0_2_get_od_setting_limits(smu, 2660 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE, 2661 &minimum, 2662 &maximum); 2663 if (input[0] < minimum || 2664 input[0] > maximum) { 2665 dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n", 2666 input[0], minimum, maximum); 2667 return -EINVAL; 2668 } 2669 2670 od_table->OverDriveTable.FanTargetTemperature = input[0]; 2671 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2672 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2673 break; 2674 2675 case PP_OD_EDIT_FAN_MINIMUM_PWM: 2676 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { 2677 dev_warn(adev->dev, "Fan curve setting not supported!\n"); 2678 return -ENOTSUPP; 2679 } 2680 2681 smu_v14_0_2_get_od_setting_limits(smu, 2682 PP_OD_FEATURE_FAN_MINIMUM_PWM, 2683 &minimum, 2684 &maximum); 2685 if (input[0] < minimum || 2686 input[0] > maximum) { 2687 dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n", 2688 input[0], minimum, maximum); 2689 return -EINVAL; 2690 } 2691 2692 od_table->OverDriveTable.FanMinimumPwm = input[0]; 2693 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2694 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2695 break; 2696 2697 case PP_OD_RESTORE_DEFAULT_TABLE: 2698 if (size == 1) { 2699 ret = smu_v14_0_2_od_restore_table_single(smu, input[0]); 2700 if (ret) 2701 return ret; 2702 } else { 2703 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask; 2704 memcpy(od_table, 2705 table_context->boot_overdrive_table, 2706 sizeof(OverDriveTableExternal_t)); 2707 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask; 2708 } 2709 fallthrough; 2710 case PP_OD_COMMIT_DPM_TABLE: 2711 /* 2712 * The member below instructs PMFW the settings focused in 2713 * this single operation. 2714 * `uint32_t FeatureCtrlMask;` 2715 * It does not contain actual informations about user's custom 2716 * settings. Thus we do not cache it. 2717 */ 2718 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary); 2719 if (memcmp((u8 *)od_table + offset_of_voltageoffset, 2720 table_context->user_overdrive_table + offset_of_voltageoffset, 2721 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) { 2722 smu_v14_0_2_dump_od_table(smu, od_table); 2723 2724 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); 2725 if (ret) { 2726 dev_err(adev->dev, "Failed to upload overdrive table!\n"); 2727 return ret; 2728 } 2729 2730 od_table->OverDriveTable.FeatureCtrlMask = 0; 2731 memcpy(table_context->user_overdrive_table + offset_of_voltageoffset, 2732 (u8 *)od_table + offset_of_voltageoffset, 2733 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset); 2734 2735 if (!memcmp(table_context->user_overdrive_table, 2736 table_context->boot_overdrive_table, 2737 sizeof(OverDriveTableExternal_t))) 2738 smu->user_dpm_profile.user_od = false; 2739 else 2740 smu->user_dpm_profile.user_od = true; 2741 } 2742 break; 2743 2744 default: 2745 return -ENOSYS; 2746 } 2747 2748 return ret; 2749 } 2750 2751 static int smu_v14_0_2_set_power_limit(struct smu_context *smu, 2752 enum smu_ppt_limit_type limit_type, 2753 uint32_t limit) 2754 { 2755 PPTable_t *pptable = smu->smu_table.driver_pptable; 2756 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; 2757 struct smu_table_context *table_context = &smu->smu_table; 2758 OverDriveTableExternal_t *od_table = 2759 (OverDriveTableExternal_t *)table_context->overdrive_table; 2760 int ret = 0; 2761 2762 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 2763 return -EINVAL; 2764 2765 if (limit <= msg_limit) { 2766 if (smu->current_power_limit > msg_limit) { 2767 od_table->OverDriveTable.Ppt = 0; 2768 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT; 2769 2770 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); 2771 if (ret) { 2772 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); 2773 return ret; 2774 } 2775 } 2776 return smu_v14_0_set_power_limit(smu, limit_type, limit); 2777 } else if (smu->od_enabled) { 2778 ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit); 2779 if (ret) 2780 return ret; 2781 2782 od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100; 2783 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT; 2784 2785 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); 2786 if (ret) { 2787 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); 2788 return ret; 2789 } 2790 2791 smu->current_power_limit = limit; 2792 } else { 2793 return -EINVAL; 2794 } 2795 2796 return 0; 2797 } 2798 2799 static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { 2800 .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask, 2801 .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table, 2802 .i2c_init = smu_v14_0_2_i2c_control_init, 2803 .i2c_fini = smu_v14_0_2_i2c_control_fini, 2804 .is_dpm_running = smu_v14_0_2_is_dpm_running, 2805 .dump_pptable = smu_v14_0_2_dump_pptable, 2806 .init_microcode = smu_v14_0_init_microcode, 2807 .load_microcode = smu_v14_0_load_microcode, 2808 .fini_microcode = smu_v14_0_fini_microcode, 2809 .init_smc_tables = smu_v14_0_2_init_smc_tables, 2810 .fini_smc_tables = smu_v14_0_fini_smc_tables, 2811 .init_power = smu_v14_0_init_power, 2812 .fini_power = smu_v14_0_fini_power, 2813 .check_fw_status = smu_v14_0_check_fw_status, 2814 .setup_pptable = smu_v14_0_2_setup_pptable, 2815 .check_fw_version = smu_v14_0_check_fw_version, 2816 .write_pptable = smu_cmn_write_pptable, 2817 .set_driver_table_location = smu_v14_0_set_driver_table_location, 2818 .system_features_control = smu_v14_0_system_features_control, 2819 .set_allowed_mask = smu_v14_0_set_allowed_mask, 2820 .get_enabled_mask = smu_cmn_get_enabled_mask, 2821 .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable, 2822 .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable, 2823 .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq, 2824 .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values, 2825 .read_sensor = smu_v14_0_2_read_sensor, 2826 .feature_is_enabled = smu_cmn_feature_is_enabled, 2827 .print_clk_levels = smu_v14_0_2_print_clk_levels, 2828 .force_clk_levels = smu_v14_0_2_force_clk_levels, 2829 .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters, 2830 .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range, 2831 .register_irq_handler = smu_v14_0_register_irq_handler, 2832 .enable_thermal_alert = smu_v14_0_enable_thermal_alert, 2833 .disable_thermal_alert = smu_v14_0_disable_thermal_alert, 2834 .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location, 2835 .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics, 2836 .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range, 2837 .set_default_od_settings = smu_v14_0_2_set_default_od_settings, 2838 .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings, 2839 .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table, 2840 .init_pptable_microcode = smu_v14_0_init_pptable_microcode, 2841 .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk, 2842 .set_performance_level = smu_v14_0_set_performance_level, 2843 .gfx_off_control = smu_v14_0_gfx_off_control, 2844 .get_unique_id = smu_v14_0_2_get_unique_id, 2845 .get_power_limit = smu_v14_0_2_get_power_limit, 2846 .set_power_limit = smu_v14_0_2_set_power_limit, 2847 .set_power_source = smu_v14_0_set_power_source, 2848 .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, 2849 .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode, 2850 .run_btc = smu_v14_0_run_btc, 2851 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2852 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 2853 .set_tool_table_location = smu_v14_0_set_tool_table_location, 2854 .deep_sleep_control = smu_v14_0_deep_sleep_control, 2855 .gfx_ulv_control = smu_v14_0_gfx_ulv_control, 2856 .get_bamaco_support = smu_v14_0_get_bamaco_support, 2857 .baco_get_state = smu_v14_0_baco_get_state, 2858 .baco_set_state = smu_v14_0_baco_set_state, 2859 .baco_enter = smu_v14_0_2_baco_enter, 2860 .baco_exit = smu_v14_0_2_baco_exit, 2861 .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported, 2862 .mode1_reset = smu_v14_0_2_mode1_reset, 2863 .mode2_reset = smu_v14_0_2_mode2_reset, 2864 .enable_gfx_features = smu_v14_0_2_enable_gfx_features, 2865 .set_mp1_state = smu_v14_0_2_set_mp1_state, 2866 .set_df_cstate = smu_v14_0_2_set_df_cstate, 2867 #if 0 2868 .gpo_control = smu_v14_0_gpo_control, 2869 #endif 2870 }; 2871 2872 void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu) 2873 { 2874 smu->ppt_funcs = &smu_v14_0_2_ppt_funcs; 2875 smu->message_map = smu_v14_0_2_message_map; 2876 smu->clock_map = smu_v14_0_2_clk_map; 2877 smu->feature_map = smu_v14_0_2_feature_mask_map; 2878 smu->table_map = smu_v14_0_2_table_map; 2879 smu->pwr_src_map = smu_v14_0_2_pwr_src_map; 2880 smu->workload_map = smu_v14_0_2_workload_map; 2881 smu_v14_0_2_set_smu_mailbox_registers(smu); 2882 } 2883