1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v11_0.h" 29 #include "smu11_driver_if_vangogh.h" 30 #include "vangogh_ppt.h" 31 #include "smu_v11_5_ppsmc.h" 32 #include "smu_v11_5_pmfw.h" 33 #include "smu_cmn.h" 34 #include "soc15_common.h" 35 #include "asic_reg/gc/gc_10_3_0_offset.h" 36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h" 37 #include <asm/processor.h> 38 39 /* 40 * DO NOT use these for err/warn/info/debug messages. 41 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 42 * They are more MGPU friendly. 43 */ 44 #undef pr_err 45 #undef pr_warn 46 #undef pr_info 47 #undef pr_debug 48 49 // Registers related to GFXOFF 50 // addressBlock: smuio_smuio_SmuSmuioDec 51 // base address: 0x5a000 52 #define mmSMUIO_GFX_MISC_CNTL 0x00c5 53 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 54 55 //SMUIO_GFX_MISC_CNTL 56 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 57 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 58 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L 59 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L 60 61 #define FEATURE_MASK(feature) (1ULL << feature) 62 #define SMC_DPM_FEATURE ( \ 63 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 68 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 69 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 70 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 71 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 72 73 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { 74 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 75 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), 76 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0), 77 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0), 78 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 79 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 80 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0), 81 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0), 82 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 83 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 84 MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0), 85 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0), 86 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0), 87 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0), 88 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0), 89 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0), 90 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), 91 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), 92 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 93 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 94 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0), 95 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), 96 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0), 97 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0), 98 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0), 99 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0), 100 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0), 101 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0), 102 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), 103 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0), 104 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0), 105 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0), 106 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0), 107 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0), 108 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 109 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 110 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0), 111 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0), 112 MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0), 113 MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0), 114 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 115 MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0), 116 MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0), 117 MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0), 118 MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0), 119 MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0), 120 MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0), 121 MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0), 122 MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0), 123 MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0), 124 MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0), 125 MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0), 126 MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0), 127 MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0), 128 MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0), 129 MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0), 130 MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0), 131 MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0), 132 MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0), 133 MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0), 134 MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0), 135 MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0), 136 MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0), 137 MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0), 138 MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0), 139 MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0), 140 MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0), 141 MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0), 142 MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0), 143 MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0), 144 }; 145 146 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = { 147 FEA_MAP(PPT), 148 FEA_MAP(TDC), 149 FEA_MAP(THERMAL), 150 FEA_MAP(DS_GFXCLK), 151 FEA_MAP(DS_SOCCLK), 152 FEA_MAP(DS_LCLK), 153 FEA_MAP(DS_FCLK), 154 FEA_MAP(DS_MP1CLK), 155 FEA_MAP(DS_MP0CLK), 156 FEA_MAP(ATHUB_PG), 157 FEA_MAP(CCLK_DPM), 158 FEA_MAP(FAN_CONTROLLER), 159 FEA_MAP(ULV), 160 FEA_MAP(VCN_DPM), 161 FEA_MAP(LCLK_DPM), 162 FEA_MAP(SHUBCLK_DPM), 163 FEA_MAP(DCFCLK_DPM), 164 FEA_MAP(DS_DCFCLK), 165 FEA_MAP(S0I2), 166 FEA_MAP(SMU_LOW_POWER), 167 FEA_MAP(GFX_DEM), 168 FEA_MAP(PSI), 169 FEA_MAP(PROCHOT), 170 FEA_MAP(CPUOFF), 171 FEA_MAP(STAPM), 172 FEA_MAP(S0I3), 173 FEA_MAP(DF_CSTATES), 174 FEA_MAP(PERF_LIMIT), 175 FEA_MAP(CORE_DLDO), 176 FEA_MAP(RSMU_LOW_POWER), 177 FEA_MAP(SMN_LOW_POWER), 178 FEA_MAP(THM_LOW_POWER), 179 FEA_MAP(SMUIO_LOW_POWER), 180 FEA_MAP(MP1_LOW_POWER), 181 FEA_MAP(DS_VCN), 182 FEA_MAP(CPPC), 183 FEA_MAP(OS_CSTATES), 184 FEA_MAP(ISP_DPM), 185 FEA_MAP(A55_DPM), 186 FEA_MAP(CVIP_DSP_DPM), 187 FEA_MAP(MSMU_LOW_POWER), 188 FEA_MAP_REVERSE(SOCCLK), 189 FEA_MAP_REVERSE(FCLK), 190 FEA_MAP_HALF_REVERSE(GFX), 191 }; 192 193 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = { 194 TAB_MAP_VALID(WATERMARKS), 195 TAB_MAP_VALID(SMU_METRICS), 196 TAB_MAP_VALID(CUSTOM_DPM), 197 TAB_MAP_VALID(DPMCLOCKS), 198 }; 199 200 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 201 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 202 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 203 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 204 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 205 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 206 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CAPPED, WORKLOAD_PPLIB_CAPPED_BIT), 207 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_UNCAPPED, WORKLOAD_PPLIB_UNCAPPED_BIT), 208 }; 209 210 static const uint8_t vangogh_throttler_map[] = { 211 [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT), 212 [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT), 213 [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT), 214 [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT), 215 [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT), 216 [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT), 217 [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT), 218 [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT), 219 [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT), 220 [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT), 221 [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT), 222 }; 223 224 static int vangogh_tables_init(struct smu_context *smu) 225 { 226 struct smu_table_context *smu_table = &smu->smu_table; 227 struct smu_table *tables = smu_table->tables; 228 229 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 230 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 231 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 232 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 233 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, 234 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 235 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 236 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 237 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), 238 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 239 240 smu_table->metrics_table = kzalloc(max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), GFP_KERNEL); 241 if (!smu_table->metrics_table) 242 goto err0_out; 243 smu_table->metrics_time = 0; 244 245 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 246 smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3)); 247 smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4)); 248 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 249 if (!smu_table->gpu_metrics_table) 250 goto err1_out; 251 252 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 253 if (!smu_table->watermarks_table) 254 goto err2_out; 255 256 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 257 if (!smu_table->clocks_table) 258 goto err3_out; 259 260 return 0; 261 262 err3_out: 263 kfree(smu_table->watermarks_table); 264 err2_out: 265 kfree(smu_table->gpu_metrics_table); 266 err1_out: 267 kfree(smu_table->metrics_table); 268 err0_out: 269 return -ENOMEM; 270 } 271 272 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, 273 MetricsMember_t member, 274 uint32_t *value) 275 { 276 struct smu_table_context *smu_table = &smu->smu_table; 277 SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; 278 int ret = 0; 279 280 ret = smu_cmn_get_metrics_table(smu, 281 NULL, 282 false); 283 if (ret) 284 return ret; 285 286 switch (member) { 287 case METRICS_CURR_GFXCLK: 288 *value = metrics->GfxclkFrequency; 289 break; 290 case METRICS_AVERAGE_SOCCLK: 291 *value = metrics->SocclkFrequency; 292 break; 293 case METRICS_AVERAGE_VCLK: 294 *value = metrics->VclkFrequency; 295 break; 296 case METRICS_AVERAGE_DCLK: 297 *value = metrics->DclkFrequency; 298 break; 299 case METRICS_CURR_UCLK: 300 *value = metrics->MemclkFrequency; 301 break; 302 case METRICS_AVERAGE_GFXACTIVITY: 303 *value = metrics->GfxActivity / 100; 304 break; 305 case METRICS_AVERAGE_VCNACTIVITY: 306 *value = metrics->UvdActivity / 100; 307 break; 308 case METRICS_AVERAGE_SOCKETPOWER: 309 *value = (metrics->CurrentSocketPower << 8) / 310 1000 ; 311 break; 312 case METRICS_TEMPERATURE_EDGE: 313 *value = metrics->GfxTemperature / 100 * 314 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 315 break; 316 case METRICS_TEMPERATURE_HOTSPOT: 317 *value = metrics->SocTemperature / 100 * 318 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 319 break; 320 case METRICS_THROTTLER_STATUS: 321 *value = metrics->ThrottlerStatus; 322 break; 323 case METRICS_VOLTAGE_VDDGFX: 324 *value = metrics->Voltage[2]; 325 break; 326 case METRICS_VOLTAGE_VDDSOC: 327 *value = metrics->Voltage[1]; 328 break; 329 case METRICS_AVERAGE_CPUCLK: 330 memcpy(value, &metrics->CoreFrequency[0], 331 smu->cpu_core_num * sizeof(uint16_t)); 332 break; 333 default: 334 *value = UINT_MAX; 335 break; 336 } 337 338 return ret; 339 } 340 341 static int vangogh_get_smu_metrics_data(struct smu_context *smu, 342 MetricsMember_t member, 343 uint32_t *value) 344 { 345 struct smu_table_context *smu_table = &smu->smu_table; 346 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 347 int ret = 0; 348 349 ret = smu_cmn_get_metrics_table(smu, 350 NULL, 351 false); 352 if (ret) 353 return ret; 354 355 switch (member) { 356 case METRICS_CURR_GFXCLK: 357 *value = metrics->Current.GfxclkFrequency; 358 break; 359 case METRICS_AVERAGE_SOCCLK: 360 *value = metrics->Current.SocclkFrequency; 361 break; 362 case METRICS_AVERAGE_VCLK: 363 *value = metrics->Current.VclkFrequency; 364 break; 365 case METRICS_AVERAGE_DCLK: 366 *value = metrics->Current.DclkFrequency; 367 break; 368 case METRICS_CURR_UCLK: 369 *value = metrics->Current.MemclkFrequency; 370 break; 371 case METRICS_AVERAGE_GFXACTIVITY: 372 *value = metrics->Current.GfxActivity; 373 break; 374 case METRICS_AVERAGE_VCNACTIVITY: 375 *value = metrics->Current.UvdActivity; 376 break; 377 case METRICS_AVERAGE_SOCKETPOWER: 378 *value = (metrics->Average.CurrentSocketPower << 8) / 379 1000; 380 break; 381 case METRICS_CURR_SOCKETPOWER: 382 *value = (metrics->Current.CurrentSocketPower << 8) / 383 1000; 384 break; 385 case METRICS_TEMPERATURE_EDGE: 386 *value = metrics->Current.GfxTemperature / 100 * 387 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 388 break; 389 case METRICS_TEMPERATURE_HOTSPOT: 390 *value = metrics->Current.SocTemperature / 100 * 391 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 392 break; 393 case METRICS_THROTTLER_STATUS: 394 *value = metrics->Current.ThrottlerStatus; 395 break; 396 case METRICS_VOLTAGE_VDDGFX: 397 *value = metrics->Current.Voltage[2]; 398 break; 399 case METRICS_VOLTAGE_VDDSOC: 400 *value = metrics->Current.Voltage[1]; 401 break; 402 case METRICS_AVERAGE_CPUCLK: 403 memcpy(value, &metrics->Current.CoreFrequency[0], 404 smu->cpu_core_num * sizeof(uint16_t)); 405 break; 406 default: 407 *value = UINT_MAX; 408 break; 409 } 410 411 return ret; 412 } 413 414 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, 415 MetricsMember_t member, 416 uint32_t *value) 417 { 418 int ret = 0; 419 420 if (smu->smc_fw_if_version < 0x3) 421 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); 422 else 423 ret = vangogh_get_smu_metrics_data(smu, member, value); 424 425 return ret; 426 } 427 428 static int vangogh_allocate_dpm_context(struct smu_context *smu) 429 { 430 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 431 432 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), 433 GFP_KERNEL); 434 if (!smu_dpm->dpm_context) 435 return -ENOMEM; 436 437 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); 438 439 return 0; 440 } 441 442 static int vangogh_init_smc_tables(struct smu_context *smu) 443 { 444 int ret = 0; 445 446 ret = vangogh_tables_init(smu); 447 if (ret) 448 return ret; 449 450 ret = vangogh_allocate_dpm_context(smu); 451 if (ret) 452 return ret; 453 454 #ifdef CONFIG_X86 455 /* AMD x86 APU only */ 456 smu->cpu_core_num = topology_num_cores_per_package(); 457 #else 458 smu->cpu_core_num = 4; 459 #endif 460 461 return smu_v11_0_init_smc_tables(smu); 462 } 463 464 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, 465 bool enable, 466 int inst) 467 { 468 int ret = 0; 469 470 if (enable) { 471 /* vcn dpm on is a prerequisite for vcn power gate messages */ 472 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); 473 if (ret) 474 return ret; 475 } else { 476 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); 477 if (ret) 478 return ret; 479 } 480 481 return ret; 482 } 483 484 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 485 { 486 int ret = 0; 487 488 if (enable) { 489 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); 490 if (ret) 491 return ret; 492 } else { 493 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 494 if (ret) 495 return ret; 496 } 497 498 return ret; 499 } 500 501 static bool vangogh_is_dpm_running(struct smu_context *smu) 502 { 503 struct amdgpu_device *adev = smu->adev; 504 int ret = 0; 505 uint64_t feature_enabled; 506 507 /* we need to re-init after suspend so return false */ 508 if (adev->in_suspend) 509 return false; 510 511 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 512 513 if (ret) 514 return false; 515 516 return !!(feature_enabled & SMC_DPM_FEATURE); 517 } 518 519 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, 520 uint32_t dpm_level, uint32_t *freq) 521 { 522 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 523 524 if (!clk_table || clk_type >= SMU_CLK_COUNT) 525 return -EINVAL; 526 527 switch (clk_type) { 528 case SMU_SOCCLK: 529 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 530 return -EINVAL; 531 *freq = clk_table->SocClocks[dpm_level]; 532 break; 533 case SMU_VCLK: 534 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 535 return -EINVAL; 536 *freq = clk_table->VcnClocks[dpm_level].vclk; 537 break; 538 case SMU_DCLK: 539 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 540 return -EINVAL; 541 *freq = clk_table->VcnClocks[dpm_level].dclk; 542 break; 543 case SMU_UCLK: 544 case SMU_MCLK: 545 if (dpm_level >= clk_table->NumDfPstatesEnabled) 546 return -EINVAL; 547 *freq = clk_table->DfPstateTable[dpm_level].memclk; 548 549 break; 550 case SMU_FCLK: 551 if (dpm_level >= clk_table->NumDfPstatesEnabled) 552 return -EINVAL; 553 *freq = clk_table->DfPstateTable[dpm_level].fclk; 554 break; 555 default: 556 return -EINVAL; 557 } 558 559 return 0; 560 } 561 562 static int vangogh_print_legacy_clk_levels(struct smu_context *smu, 563 enum smu_clk_type clk_type, char *buf) 564 { 565 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 566 SmuMetrics_legacy_t metrics; 567 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 568 int i, idx, size = 0, ret = 0, start_offset = 0; 569 uint32_t cur_value = 0, value = 0, count = 0; 570 bool cur_value_match_level = false; 571 572 memset(&metrics, 0, sizeof(metrics)); 573 574 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 575 if (ret) 576 return ret; 577 578 smu_cmn_get_sysfs_buf(&buf, &size); 579 start_offset = size; 580 581 switch (clk_type) { 582 case SMU_OD_SCLK: 583 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 584 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 585 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 586 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 587 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 588 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 589 } 590 break; 591 case SMU_OD_CCLK: 592 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 593 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 594 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 595 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 596 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 597 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 598 } 599 break; 600 case SMU_OD_RANGE: 601 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 602 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 603 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 604 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 605 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 606 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 607 } 608 break; 609 case SMU_SOCCLK: 610 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 611 count = clk_table->NumSocClkLevelsEnabled; 612 cur_value = metrics.SocclkFrequency; 613 break; 614 case SMU_VCLK: 615 count = clk_table->VcnClkLevelsEnabled; 616 cur_value = metrics.VclkFrequency; 617 break; 618 case SMU_DCLK: 619 count = clk_table->VcnClkLevelsEnabled; 620 cur_value = metrics.DclkFrequency; 621 break; 622 case SMU_MCLK: 623 count = clk_table->NumDfPstatesEnabled; 624 cur_value = metrics.MemclkFrequency; 625 break; 626 case SMU_FCLK: 627 count = clk_table->NumDfPstatesEnabled; 628 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 629 if (ret) 630 return ret; 631 break; 632 default: 633 break; 634 } 635 636 switch (clk_type) { 637 case SMU_SOCCLK: 638 case SMU_VCLK: 639 case SMU_DCLK: 640 case SMU_MCLK: 641 case SMU_FCLK: 642 for (i = 0; i < count; i++) { 643 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 644 ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); 645 if (ret) 646 return ret; 647 if (!value) 648 continue; 649 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 650 cur_value == value ? "*" : ""); 651 if (cur_value == value) 652 cur_value_match_level = true; 653 } 654 655 if (!cur_value_match_level) 656 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 657 break; 658 default: 659 break; 660 } 661 662 return size - start_offset; 663 } 664 665 static int vangogh_print_clk_levels(struct smu_context *smu, 666 enum smu_clk_type clk_type, char *buf) 667 { 668 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 669 SmuMetrics_t metrics; 670 int i, idx, size = 0, ret = 0, start_offset = 0; 671 uint32_t cur_value = 0, value = 0, count = 0; 672 bool cur_value_match_level = false; 673 uint32_t min, max; 674 675 memset(&metrics, 0, sizeof(metrics)); 676 677 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 678 if (ret) 679 return ret; 680 681 smu_cmn_get_sysfs_buf(&buf, &size); 682 start_offset = size; 683 684 switch (clk_type) { 685 case SMU_OD_SCLK: 686 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 687 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 688 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 689 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 690 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 691 break; 692 case SMU_OD_CCLK: 693 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 694 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 695 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 696 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 697 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 698 break; 699 case SMU_OD_RANGE: 700 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 701 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 702 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 703 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 704 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 705 break; 706 case SMU_SOCCLK: 707 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 708 count = clk_table->NumSocClkLevelsEnabled; 709 cur_value = metrics.Current.SocclkFrequency; 710 break; 711 case SMU_VCLK: 712 count = clk_table->VcnClkLevelsEnabled; 713 cur_value = metrics.Current.VclkFrequency; 714 break; 715 case SMU_DCLK: 716 count = clk_table->VcnClkLevelsEnabled; 717 cur_value = metrics.Current.DclkFrequency; 718 break; 719 case SMU_MCLK: 720 count = clk_table->NumDfPstatesEnabled; 721 cur_value = metrics.Current.MemclkFrequency; 722 break; 723 case SMU_FCLK: 724 count = clk_table->NumDfPstatesEnabled; 725 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 726 if (ret) 727 return ret; 728 break; 729 case SMU_GFXCLK: 730 case SMU_SCLK: 731 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); 732 if (ret) { 733 return ret; 734 } 735 break; 736 default: 737 break; 738 } 739 740 switch (clk_type) { 741 case SMU_SOCCLK: 742 case SMU_VCLK: 743 case SMU_DCLK: 744 case SMU_MCLK: 745 case SMU_FCLK: 746 for (i = 0; i < count; i++) { 747 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 748 ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); 749 if (ret) 750 return ret; 751 if (!value) 752 continue; 753 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 754 cur_value == value ? "*" : ""); 755 if (cur_value == value) 756 cur_value_match_level = true; 757 } 758 759 if (!cur_value_match_level) 760 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 761 break; 762 case SMU_GFXCLK: 763 case SMU_SCLK: 764 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 765 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 766 if (cur_value == max) 767 i = 2; 768 else if (cur_value == min) 769 i = 0; 770 else 771 i = 1; 772 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 773 i == 0 ? "*" : ""); 774 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 775 i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, 776 i == 1 ? "*" : ""); 777 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 778 i == 2 ? "*" : ""); 779 break; 780 default: 781 break; 782 } 783 784 return size - start_offset; 785 } 786 787 static int vangogh_common_print_clk_levels(struct smu_context *smu, 788 enum smu_clk_type clk_type, char *buf) 789 { 790 int ret = 0; 791 792 if (smu->smc_fw_if_version < 0x3) 793 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); 794 else 795 ret = vangogh_print_clk_levels(smu, clk_type, buf); 796 797 return ret; 798 } 799 800 static int vangogh_get_profiling_clk_mask(struct smu_context *smu, 801 enum amd_dpm_forced_level level, 802 uint32_t *vclk_mask, 803 uint32_t *dclk_mask, 804 uint32_t *mclk_mask, 805 uint32_t *fclk_mask, 806 uint32_t *soc_mask) 807 { 808 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 809 810 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 811 if (mclk_mask) 812 *mclk_mask = clk_table->NumDfPstatesEnabled - 1; 813 814 if (fclk_mask) 815 *fclk_mask = clk_table->NumDfPstatesEnabled - 1; 816 817 if (soc_mask) 818 *soc_mask = 0; 819 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 820 if (mclk_mask) 821 *mclk_mask = 0; 822 823 if (fclk_mask) 824 *fclk_mask = 0; 825 826 if (soc_mask) 827 *soc_mask = 1; 828 829 if (vclk_mask) 830 *vclk_mask = 1; 831 832 if (dclk_mask) 833 *dclk_mask = 1; 834 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) { 835 if (mclk_mask) 836 *mclk_mask = 0; 837 838 if (fclk_mask) 839 *fclk_mask = 0; 840 841 if (soc_mask) 842 *soc_mask = 1; 843 844 if (vclk_mask) 845 *vclk_mask = 1; 846 847 if (dclk_mask) 848 *dclk_mask = 1; 849 } 850 851 return 0; 852 } 853 854 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu, 855 enum smu_clk_type clk_type) 856 { 857 enum smu_feature_mask feature_id = 0; 858 859 switch (clk_type) { 860 case SMU_MCLK: 861 case SMU_UCLK: 862 case SMU_FCLK: 863 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 864 break; 865 case SMU_GFXCLK: 866 case SMU_SCLK: 867 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 868 break; 869 case SMU_SOCCLK: 870 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 871 break; 872 case SMU_VCLK: 873 case SMU_DCLK: 874 feature_id = SMU_FEATURE_VCN_DPM_BIT; 875 break; 876 default: 877 return true; 878 } 879 880 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 881 return false; 882 883 return true; 884 } 885 886 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, 887 enum smu_clk_type clk_type, 888 uint32_t *min, 889 uint32_t *max) 890 { 891 int ret = 0; 892 uint32_t soc_mask; 893 uint32_t vclk_mask; 894 uint32_t dclk_mask; 895 uint32_t mclk_mask; 896 uint32_t fclk_mask; 897 uint32_t clock_limit; 898 899 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) { 900 switch (clk_type) { 901 case SMU_MCLK: 902 case SMU_UCLK: 903 clock_limit = smu->smu_table.boot_values.uclk; 904 break; 905 case SMU_FCLK: 906 clock_limit = smu->smu_table.boot_values.fclk; 907 break; 908 case SMU_GFXCLK: 909 case SMU_SCLK: 910 clock_limit = smu->smu_table.boot_values.gfxclk; 911 break; 912 case SMU_SOCCLK: 913 clock_limit = smu->smu_table.boot_values.socclk; 914 break; 915 case SMU_VCLK: 916 clock_limit = smu->smu_table.boot_values.vclk; 917 break; 918 case SMU_DCLK: 919 clock_limit = smu->smu_table.boot_values.dclk; 920 break; 921 default: 922 clock_limit = 0; 923 break; 924 } 925 926 /* clock in Mhz unit */ 927 if (min) 928 *min = clock_limit / 100; 929 if (max) 930 *max = clock_limit / 100; 931 932 return 0; 933 } 934 if (max) { 935 ret = vangogh_get_profiling_clk_mask(smu, 936 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, 937 &vclk_mask, 938 &dclk_mask, 939 &mclk_mask, 940 &fclk_mask, 941 &soc_mask); 942 if (ret) 943 goto failed; 944 945 switch (clk_type) { 946 case SMU_UCLK: 947 case SMU_MCLK: 948 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); 949 if (ret) 950 goto failed; 951 break; 952 case SMU_SOCCLK: 953 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max); 954 if (ret) 955 goto failed; 956 break; 957 case SMU_FCLK: 958 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max); 959 if (ret) 960 goto failed; 961 break; 962 case SMU_VCLK: 963 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max); 964 if (ret) 965 goto failed; 966 break; 967 case SMU_DCLK: 968 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max); 969 if (ret) 970 goto failed; 971 break; 972 default: 973 ret = -EINVAL; 974 goto failed; 975 } 976 } 977 if (min) { 978 ret = vangogh_get_profiling_clk_mask(smu, 979 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK, 980 NULL, 981 NULL, 982 &mclk_mask, 983 &fclk_mask, 984 &soc_mask); 985 if (ret) 986 goto failed; 987 988 vclk_mask = dclk_mask = 0; 989 990 switch (clk_type) { 991 case SMU_UCLK: 992 case SMU_MCLK: 993 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min); 994 if (ret) 995 goto failed; 996 break; 997 case SMU_SOCCLK: 998 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min); 999 if (ret) 1000 goto failed; 1001 break; 1002 case SMU_FCLK: 1003 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min); 1004 if (ret) 1005 goto failed; 1006 break; 1007 case SMU_VCLK: 1008 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min); 1009 if (ret) 1010 goto failed; 1011 break; 1012 case SMU_DCLK: 1013 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min); 1014 if (ret) 1015 goto failed; 1016 break; 1017 default: 1018 ret = -EINVAL; 1019 goto failed; 1020 } 1021 } 1022 failed: 1023 return ret; 1024 } 1025 1026 static int vangogh_get_power_profile_mode(struct smu_context *smu, 1027 char *buf) 1028 { 1029 uint32_t i, size = 0; 1030 int16_t workload_type = 0; 1031 1032 if (!buf) 1033 return -EINVAL; 1034 1035 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 1036 /* 1037 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT 1038 * Not all profile modes are supported on vangogh. 1039 */ 1040 workload_type = smu_cmn_to_asic_specific_index(smu, 1041 CMN2ASIC_MAPPING_WORKLOAD, 1042 i); 1043 1044 if (workload_type < 0) 1045 continue; 1046 1047 size += sysfs_emit_at(buf, size, "%2d %14s%s\n", 1048 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1049 } 1050 1051 return size; 1052 } 1053 1054 static int vangogh_set_power_profile_mode(struct smu_context *smu, 1055 u32 workload_mask, 1056 long *custom_params, 1057 u32 custom_params_max_idx) 1058 { 1059 u32 backend_workload_mask = 0; 1060 int ret; 1061 1062 smu_cmn_get_backend_workload_mask(smu, workload_mask, 1063 &backend_workload_mask); 1064 1065 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 1066 backend_workload_mask, 1067 NULL); 1068 if (ret) { 1069 dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n", 1070 workload_mask); 1071 return ret; 1072 } 1073 1074 return ret; 1075 } 1076 1077 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, 1078 enum smu_clk_type clk_type, 1079 uint32_t min, 1080 uint32_t max, 1081 bool automatic) 1082 { 1083 int ret = 0; 1084 1085 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) 1086 return 0; 1087 1088 switch (clk_type) { 1089 case SMU_GFXCLK: 1090 case SMU_SCLK: 1091 ret = smu_cmn_send_smc_msg_with_param(smu, 1092 SMU_MSG_SetHardMinGfxClk, 1093 min, NULL); 1094 if (ret) 1095 return ret; 1096 1097 ret = smu_cmn_send_smc_msg_with_param(smu, 1098 SMU_MSG_SetSoftMaxGfxClk, 1099 max, NULL); 1100 if (ret) 1101 return ret; 1102 break; 1103 case SMU_FCLK: 1104 ret = smu_cmn_send_smc_msg_with_param(smu, 1105 SMU_MSG_SetHardMinFclkByFreq, 1106 min, NULL); 1107 if (ret) 1108 return ret; 1109 1110 ret = smu_cmn_send_smc_msg_with_param(smu, 1111 SMU_MSG_SetSoftMaxFclkByFreq, 1112 max, NULL); 1113 if (ret) 1114 return ret; 1115 break; 1116 case SMU_SOCCLK: 1117 ret = smu_cmn_send_smc_msg_with_param(smu, 1118 SMU_MSG_SetHardMinSocclkByFreq, 1119 min, NULL); 1120 if (ret) 1121 return ret; 1122 1123 ret = smu_cmn_send_smc_msg_with_param(smu, 1124 SMU_MSG_SetSoftMaxSocclkByFreq, 1125 max, NULL); 1126 if (ret) 1127 return ret; 1128 break; 1129 case SMU_VCLK: 1130 ret = smu_cmn_send_smc_msg_with_param(smu, 1131 SMU_MSG_SetHardMinVcn, 1132 min << 16, NULL); 1133 if (ret) 1134 return ret; 1135 ret = smu_cmn_send_smc_msg_with_param(smu, 1136 SMU_MSG_SetSoftMaxVcn, 1137 max << 16, NULL); 1138 if (ret) 1139 return ret; 1140 break; 1141 case SMU_DCLK: 1142 ret = smu_cmn_send_smc_msg_with_param(smu, 1143 SMU_MSG_SetHardMinVcn, 1144 min, NULL); 1145 if (ret) 1146 return ret; 1147 ret = smu_cmn_send_smc_msg_with_param(smu, 1148 SMU_MSG_SetSoftMaxVcn, 1149 max, NULL); 1150 if (ret) 1151 return ret; 1152 break; 1153 default: 1154 return -EINVAL; 1155 } 1156 1157 return ret; 1158 } 1159 1160 static int vangogh_force_clk_levels(struct smu_context *smu, 1161 enum smu_clk_type clk_type, uint32_t mask) 1162 { 1163 uint32_t soft_min_level = 0, soft_max_level = 0; 1164 uint32_t min_freq = 0, max_freq = 0; 1165 int ret = 0 ; 1166 1167 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1168 soft_max_level = mask ? (fls(mask) - 1) : 0; 1169 1170 switch (clk_type) { 1171 case SMU_SOCCLK: 1172 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1173 soft_min_level, &min_freq); 1174 if (ret) 1175 return ret; 1176 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1177 soft_max_level, &max_freq); 1178 if (ret) 1179 return ret; 1180 ret = smu_cmn_send_smc_msg_with_param(smu, 1181 SMU_MSG_SetSoftMaxSocclkByFreq, 1182 max_freq, NULL); 1183 if (ret) 1184 return ret; 1185 ret = smu_cmn_send_smc_msg_with_param(smu, 1186 SMU_MSG_SetHardMinSocclkByFreq, 1187 min_freq, NULL); 1188 if (ret) 1189 return ret; 1190 break; 1191 case SMU_FCLK: 1192 ret = vangogh_get_dpm_clk_limited(smu, 1193 clk_type, soft_min_level, &min_freq); 1194 if (ret) 1195 return ret; 1196 ret = vangogh_get_dpm_clk_limited(smu, 1197 clk_type, soft_max_level, &max_freq); 1198 if (ret) 1199 return ret; 1200 ret = smu_cmn_send_smc_msg_with_param(smu, 1201 SMU_MSG_SetSoftMaxFclkByFreq, 1202 max_freq, NULL); 1203 if (ret) 1204 return ret; 1205 ret = smu_cmn_send_smc_msg_with_param(smu, 1206 SMU_MSG_SetHardMinFclkByFreq, 1207 min_freq, NULL); 1208 if (ret) 1209 return ret; 1210 break; 1211 case SMU_VCLK: 1212 ret = vangogh_get_dpm_clk_limited(smu, 1213 clk_type, soft_min_level, &min_freq); 1214 if (ret) 1215 return ret; 1216 1217 ret = vangogh_get_dpm_clk_limited(smu, 1218 clk_type, soft_max_level, &max_freq); 1219 if (ret) 1220 return ret; 1221 1222 1223 ret = smu_cmn_send_smc_msg_with_param(smu, 1224 SMU_MSG_SetHardMinVcn, 1225 min_freq << 16, NULL); 1226 if (ret) 1227 return ret; 1228 1229 ret = smu_cmn_send_smc_msg_with_param(smu, 1230 SMU_MSG_SetSoftMaxVcn, 1231 max_freq << 16, NULL); 1232 if (ret) 1233 return ret; 1234 1235 break; 1236 case SMU_DCLK: 1237 ret = vangogh_get_dpm_clk_limited(smu, 1238 clk_type, soft_min_level, &min_freq); 1239 if (ret) 1240 return ret; 1241 1242 ret = vangogh_get_dpm_clk_limited(smu, 1243 clk_type, soft_max_level, &max_freq); 1244 if (ret) 1245 return ret; 1246 1247 ret = smu_cmn_send_smc_msg_with_param(smu, 1248 SMU_MSG_SetHardMinVcn, 1249 min_freq, NULL); 1250 if (ret) 1251 return ret; 1252 1253 ret = smu_cmn_send_smc_msg_with_param(smu, 1254 SMU_MSG_SetSoftMaxVcn, 1255 max_freq, NULL); 1256 if (ret) 1257 return ret; 1258 1259 break; 1260 default: 1261 break; 1262 } 1263 1264 return ret; 1265 } 1266 1267 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) 1268 { 1269 int ret = 0, i = 0; 1270 uint32_t min_freq, max_freq, force_freq; 1271 enum smu_clk_type clk_type; 1272 1273 enum smu_clk_type clks[] = { 1274 SMU_SOCCLK, 1275 SMU_VCLK, 1276 SMU_DCLK, 1277 SMU_FCLK, 1278 }; 1279 1280 for (i = 0; i < ARRAY_SIZE(clks); i++) { 1281 clk_type = clks[i]; 1282 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1283 if (ret) 1284 return ret; 1285 1286 force_freq = highest ? max_freq : min_freq; 1287 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); 1288 if (ret) 1289 return ret; 1290 } 1291 1292 return ret; 1293 } 1294 1295 static int vangogh_unforce_dpm_levels(struct smu_context *smu) 1296 { 1297 int ret = 0, i = 0; 1298 uint32_t min_freq, max_freq; 1299 enum smu_clk_type clk_type; 1300 1301 struct clk_feature_map { 1302 enum smu_clk_type clk_type; 1303 uint32_t feature; 1304 } clk_feature_map[] = { 1305 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, 1306 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 1307 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, 1308 {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT}, 1309 }; 1310 1311 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { 1312 1313 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) 1314 continue; 1315 1316 clk_type = clk_feature_map[i].clk_type; 1317 1318 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1319 1320 if (ret) 1321 return ret; 1322 1323 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); 1324 1325 if (ret) 1326 return ret; 1327 } 1328 1329 return ret; 1330 } 1331 1332 static int vangogh_set_peak_clock_by_device(struct smu_context *smu) 1333 { 1334 int ret = 0; 1335 uint32_t socclk_freq = 0, fclk_freq = 0; 1336 uint32_t vclk_freq = 0, dclk_freq = 0; 1337 1338 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq); 1339 if (ret) 1340 return ret; 1341 1342 ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq, false); 1343 if (ret) 1344 return ret; 1345 1346 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq); 1347 if (ret) 1348 return ret; 1349 1350 ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq, false); 1351 if (ret) 1352 return ret; 1353 1354 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq); 1355 if (ret) 1356 return ret; 1357 1358 ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq, false); 1359 if (ret) 1360 return ret; 1361 1362 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq); 1363 if (ret) 1364 return ret; 1365 1366 ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq, false); 1367 if (ret) 1368 return ret; 1369 1370 return ret; 1371 } 1372 1373 static int vangogh_set_performance_level(struct smu_context *smu, 1374 enum amd_dpm_forced_level level) 1375 { 1376 int ret = 0, i; 1377 uint32_t soc_mask, mclk_mask, fclk_mask; 1378 uint32_t vclk_mask = 0, dclk_mask = 0; 1379 1380 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 1381 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 1382 1383 switch (level) { 1384 case AMD_DPM_FORCED_LEVEL_HIGH: 1385 smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; 1386 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1387 1388 1389 ret = vangogh_force_dpm_limit_value(smu, true); 1390 if (ret) 1391 return ret; 1392 break; 1393 case AMD_DPM_FORCED_LEVEL_LOW: 1394 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1395 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1396 1397 ret = vangogh_force_dpm_limit_value(smu, false); 1398 if (ret) 1399 return ret; 1400 break; 1401 case AMD_DPM_FORCED_LEVEL_AUTO: 1402 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1403 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1404 1405 ret = vangogh_unforce_dpm_levels(smu); 1406 if (ret) 1407 return ret; 1408 break; 1409 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1410 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1411 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1412 1413 ret = vangogh_get_profiling_clk_mask(smu, level, 1414 &vclk_mask, 1415 &dclk_mask, 1416 &mclk_mask, 1417 &fclk_mask, 1418 &soc_mask); 1419 if (ret) 1420 return ret; 1421 1422 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1423 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); 1424 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); 1425 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); 1426 break; 1427 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1428 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1429 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1430 break; 1431 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1432 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1433 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1434 1435 ret = vangogh_get_profiling_clk_mask(smu, level, 1436 NULL, 1437 NULL, 1438 &mclk_mask, 1439 &fclk_mask, 1440 NULL); 1441 if (ret) 1442 return ret; 1443 1444 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1445 break; 1446 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1447 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1448 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1449 1450 ret = vangogh_set_peak_clock_by_device(smu); 1451 if (ret) 1452 return ret; 1453 break; 1454 case AMD_DPM_FORCED_LEVEL_MANUAL: 1455 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1456 default: 1457 return 0; 1458 } 1459 1460 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1461 smu->gfx_actual_hard_min_freq, NULL); 1462 if (ret) 1463 return ret; 1464 1465 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1466 smu->gfx_actual_soft_max_freq, NULL); 1467 if (ret) 1468 return ret; 1469 1470 if (smu->adev->pm.fw_version >= 0x43f1b00) { 1471 for (i = 0; i < smu->cpu_core_num; i++) { 1472 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 1473 ((i << 20) 1474 | smu->cpu_actual_soft_min_freq), 1475 NULL); 1476 if (ret) 1477 return ret; 1478 1479 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 1480 ((i << 20) 1481 | smu->cpu_actual_soft_max_freq), 1482 NULL); 1483 if (ret) 1484 return ret; 1485 } 1486 } 1487 1488 return ret; 1489 } 1490 1491 static int vangogh_read_sensor(struct smu_context *smu, 1492 enum amd_pp_sensors sensor, 1493 void *data, uint32_t *size) 1494 { 1495 int ret = 0; 1496 1497 if (!data || !size) 1498 return -EINVAL; 1499 1500 switch (sensor) { 1501 case AMDGPU_PP_SENSOR_GPU_LOAD: 1502 ret = vangogh_common_get_smu_metrics_data(smu, 1503 METRICS_AVERAGE_GFXACTIVITY, 1504 (uint32_t *)data); 1505 *size = 4; 1506 break; 1507 case AMDGPU_PP_SENSOR_VCN_LOAD: 1508 ret = vangogh_common_get_smu_metrics_data(smu, 1509 METRICS_AVERAGE_VCNACTIVITY, 1510 (uint32_t *)data); 1511 *size = 4; 1512 break; 1513 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 1514 ret = vangogh_common_get_smu_metrics_data(smu, 1515 METRICS_AVERAGE_SOCKETPOWER, 1516 (uint32_t *)data); 1517 *size = 4; 1518 break; 1519 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 1520 ret = vangogh_common_get_smu_metrics_data(smu, 1521 METRICS_CURR_SOCKETPOWER, 1522 (uint32_t *)data); 1523 *size = 4; 1524 break; 1525 case AMDGPU_PP_SENSOR_EDGE_TEMP: 1526 ret = vangogh_common_get_smu_metrics_data(smu, 1527 METRICS_TEMPERATURE_EDGE, 1528 (uint32_t *)data); 1529 *size = 4; 1530 break; 1531 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1532 ret = vangogh_common_get_smu_metrics_data(smu, 1533 METRICS_TEMPERATURE_HOTSPOT, 1534 (uint32_t *)data); 1535 *size = 4; 1536 break; 1537 case AMDGPU_PP_SENSOR_GFX_MCLK: 1538 ret = vangogh_common_get_smu_metrics_data(smu, 1539 METRICS_CURR_UCLK, 1540 (uint32_t *)data); 1541 *(uint32_t *)data *= 100; 1542 *size = 4; 1543 break; 1544 case AMDGPU_PP_SENSOR_GFX_SCLK: 1545 ret = vangogh_common_get_smu_metrics_data(smu, 1546 METRICS_CURR_GFXCLK, 1547 (uint32_t *)data); 1548 *(uint32_t *)data *= 100; 1549 *size = 4; 1550 break; 1551 case AMDGPU_PP_SENSOR_VDDGFX: 1552 ret = vangogh_common_get_smu_metrics_data(smu, 1553 METRICS_VOLTAGE_VDDGFX, 1554 (uint32_t *)data); 1555 *size = 4; 1556 break; 1557 case AMDGPU_PP_SENSOR_VDDNB: 1558 ret = vangogh_common_get_smu_metrics_data(smu, 1559 METRICS_VOLTAGE_VDDSOC, 1560 (uint32_t *)data); 1561 *size = 4; 1562 break; 1563 case AMDGPU_PP_SENSOR_CPU_CLK: 1564 ret = vangogh_common_get_smu_metrics_data(smu, 1565 METRICS_AVERAGE_CPUCLK, 1566 (uint32_t *)data); 1567 *size = smu->cpu_core_num * sizeof(uint16_t); 1568 break; 1569 default: 1570 ret = -EOPNOTSUPP; 1571 break; 1572 } 1573 1574 return ret; 1575 } 1576 1577 static int vangogh_get_apu_thermal_limit(struct smu_context *smu, uint32_t *limit) 1578 { 1579 return smu_cmn_send_smc_msg_with_param(smu, 1580 SMU_MSG_GetThermalLimit, 1581 0, limit); 1582 } 1583 1584 static int vangogh_set_apu_thermal_limit(struct smu_context *smu, uint32_t limit) 1585 { 1586 return smu_cmn_send_smc_msg_with_param(smu, 1587 SMU_MSG_SetReducedThermalLimit, 1588 limit, NULL); 1589 } 1590 1591 1592 static int vangogh_set_watermarks_table(struct smu_context *smu, 1593 struct pp_smu_wm_range_sets *clock_ranges) 1594 { 1595 int i; 1596 int ret = 0; 1597 Watermarks_t *table = smu->smu_table.watermarks_table; 1598 1599 if (!table || !clock_ranges) 1600 return -EINVAL; 1601 1602 if (clock_ranges) { 1603 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 1604 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 1605 return -EINVAL; 1606 1607 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 1608 table->WatermarkRow[WM_DCFCLK][i].MinClock = 1609 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 1610 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 1611 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 1612 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 1613 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 1614 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 1615 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 1616 1617 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 1618 clock_ranges->reader_wm_sets[i].wm_inst; 1619 } 1620 1621 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 1622 table->WatermarkRow[WM_SOCCLK][i].MinClock = 1623 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 1624 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 1625 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 1626 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 1627 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 1628 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 1629 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 1630 1631 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 1632 clock_ranges->writer_wm_sets[i].wm_inst; 1633 } 1634 1635 smu->watermarks_bitmap |= WATERMARKS_EXIST; 1636 } 1637 1638 /* pass data to smu controller */ 1639 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1640 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1641 ret = smu_cmn_write_watermarks_table(smu); 1642 if (ret) { 1643 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 1644 return ret; 1645 } 1646 smu->watermarks_bitmap |= WATERMARKS_LOADED; 1647 } 1648 1649 return 0; 1650 } 1651 1652 static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu, 1653 void **table) 1654 { 1655 struct smu_table_context *smu_table = &smu->smu_table; 1656 struct gpu_metrics_v2_3 *gpu_metrics = 1657 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; 1658 SmuMetrics_legacy_t metrics; 1659 int ret = 0; 1660 1661 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1662 if (ret) 1663 return ret; 1664 1665 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); 1666 1667 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1668 gpu_metrics->temperature_soc = metrics.SocTemperature; 1669 memcpy(&gpu_metrics->temperature_core[0], 1670 &metrics.CoreTemperature[0], 1671 sizeof(uint16_t) * 4); 1672 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1673 1674 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1675 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1676 1677 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1678 gpu_metrics->average_cpu_power = metrics.Power[0]; 1679 gpu_metrics->average_soc_power = metrics.Power[1]; 1680 gpu_metrics->average_gfx_power = metrics.Power[2]; 1681 memcpy(&gpu_metrics->average_core_power[0], 1682 &metrics.CorePower[0], 1683 sizeof(uint16_t) * 4); 1684 1685 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1686 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1687 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1688 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1689 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1690 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1691 1692 memcpy(&gpu_metrics->current_coreclk[0], 1693 &metrics.CoreFrequency[0], 1694 sizeof(uint16_t) * 4); 1695 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1696 1697 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1698 gpu_metrics->indep_throttle_status = 1699 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1700 vangogh_throttler_map); 1701 1702 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1703 1704 *table = (void *)gpu_metrics; 1705 1706 return sizeof(struct gpu_metrics_v2_3); 1707 } 1708 1709 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, 1710 void **table) 1711 { 1712 struct smu_table_context *smu_table = &smu->smu_table; 1713 struct gpu_metrics_v2_2 *gpu_metrics = 1714 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1715 SmuMetrics_legacy_t metrics; 1716 int ret = 0; 1717 1718 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1719 if (ret) 1720 return ret; 1721 1722 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1723 1724 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1725 gpu_metrics->temperature_soc = metrics.SocTemperature; 1726 memcpy(&gpu_metrics->temperature_core[0], 1727 &metrics.CoreTemperature[0], 1728 sizeof(uint16_t) * 4); 1729 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1730 1731 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1732 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1733 1734 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1735 gpu_metrics->average_cpu_power = metrics.Power[0]; 1736 gpu_metrics->average_soc_power = metrics.Power[1]; 1737 gpu_metrics->average_gfx_power = metrics.Power[2]; 1738 memcpy(&gpu_metrics->average_core_power[0], 1739 &metrics.CorePower[0], 1740 sizeof(uint16_t) * 4); 1741 1742 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1743 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1744 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1745 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1746 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1747 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1748 1749 memcpy(&gpu_metrics->current_coreclk[0], 1750 &metrics.CoreFrequency[0], 1751 sizeof(uint16_t) * 4); 1752 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1753 1754 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1755 gpu_metrics->indep_throttle_status = 1756 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1757 vangogh_throttler_map); 1758 1759 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1760 1761 *table = (void *)gpu_metrics; 1762 1763 return sizeof(struct gpu_metrics_v2_2); 1764 } 1765 1766 static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu, 1767 void **table) 1768 { 1769 struct smu_table_context *smu_table = &smu->smu_table; 1770 struct gpu_metrics_v2_3 *gpu_metrics = 1771 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; 1772 SmuMetrics_t metrics; 1773 int ret = 0; 1774 1775 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1776 if (ret) 1777 return ret; 1778 1779 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); 1780 1781 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1782 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1783 memcpy(&gpu_metrics->temperature_core[0], 1784 &metrics.Current.CoreTemperature[0], 1785 sizeof(uint16_t) * 4); 1786 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1787 1788 gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature; 1789 gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature; 1790 memcpy(&gpu_metrics->average_temperature_core[0], 1791 &metrics.Average.CoreTemperature[0], 1792 sizeof(uint16_t) * 4); 1793 gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0]; 1794 1795 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1796 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1797 1798 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1799 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1800 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1801 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1802 memcpy(&gpu_metrics->average_core_power[0], 1803 &metrics.Average.CorePower[0], 1804 sizeof(uint16_t) * 4); 1805 1806 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1807 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1808 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1809 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1810 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1811 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1812 1813 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1814 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1815 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1816 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1817 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1818 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1819 1820 memcpy(&gpu_metrics->current_coreclk[0], 1821 &metrics.Current.CoreFrequency[0], 1822 sizeof(uint16_t) * 4); 1823 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1824 1825 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1826 gpu_metrics->indep_throttle_status = 1827 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1828 vangogh_throttler_map); 1829 1830 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1831 1832 *table = (void *)gpu_metrics; 1833 1834 return sizeof(struct gpu_metrics_v2_3); 1835 } 1836 1837 static ssize_t vangogh_get_gpu_metrics_v2_4(struct smu_context *smu, 1838 void **table) 1839 { 1840 SmuMetrics_t metrics; 1841 struct smu_table_context *smu_table = &smu->smu_table; 1842 struct gpu_metrics_v2_4 *gpu_metrics = 1843 (struct gpu_metrics_v2_4 *)smu_table->gpu_metrics_table; 1844 int ret = 0; 1845 1846 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1847 if (ret) 1848 return ret; 1849 1850 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 4); 1851 1852 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1853 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1854 memcpy(&gpu_metrics->temperature_core[0], 1855 &metrics.Current.CoreTemperature[0], 1856 sizeof(uint16_t) * 4); 1857 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1858 1859 gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature; 1860 gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature; 1861 memcpy(&gpu_metrics->average_temperature_core[0], 1862 &metrics.Average.CoreTemperature[0], 1863 sizeof(uint16_t) * 4); 1864 gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0]; 1865 1866 gpu_metrics->average_gfx_activity = metrics.Average.GfxActivity; 1867 gpu_metrics->average_mm_activity = metrics.Average.UvdActivity; 1868 1869 gpu_metrics->average_socket_power = metrics.Average.CurrentSocketPower; 1870 gpu_metrics->average_cpu_power = metrics.Average.Power[0]; 1871 gpu_metrics->average_soc_power = metrics.Average.Power[1]; 1872 gpu_metrics->average_gfx_power = metrics.Average.Power[2]; 1873 1874 gpu_metrics->average_cpu_voltage = metrics.Average.Voltage[0]; 1875 gpu_metrics->average_soc_voltage = metrics.Average.Voltage[1]; 1876 gpu_metrics->average_gfx_voltage = metrics.Average.Voltage[2]; 1877 1878 gpu_metrics->average_cpu_current = metrics.Average.Current[0]; 1879 gpu_metrics->average_soc_current = metrics.Average.Current[1]; 1880 gpu_metrics->average_gfx_current = metrics.Average.Current[2]; 1881 1882 memcpy(&gpu_metrics->average_core_power[0], 1883 &metrics.Average.CorePower[0], 1884 sizeof(uint16_t) * 4); 1885 1886 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1887 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1888 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1889 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1890 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1891 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1892 1893 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1894 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1895 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1896 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1897 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1898 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1899 1900 memcpy(&gpu_metrics->current_coreclk[0], 1901 &metrics.Current.CoreFrequency[0], 1902 sizeof(uint16_t) * 4); 1903 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1904 1905 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1906 gpu_metrics->indep_throttle_status = 1907 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1908 vangogh_throttler_map); 1909 1910 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1911 1912 *table = (void *)gpu_metrics; 1913 1914 return sizeof(struct gpu_metrics_v2_4); 1915 } 1916 1917 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, 1918 void **table) 1919 { 1920 struct smu_table_context *smu_table = &smu->smu_table; 1921 struct gpu_metrics_v2_2 *gpu_metrics = 1922 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1923 SmuMetrics_t metrics; 1924 int ret = 0; 1925 1926 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1927 if (ret) 1928 return ret; 1929 1930 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1931 1932 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1933 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1934 memcpy(&gpu_metrics->temperature_core[0], 1935 &metrics.Current.CoreTemperature[0], 1936 sizeof(uint16_t) * 4); 1937 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1938 1939 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1940 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1941 1942 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1943 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1944 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1945 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1946 memcpy(&gpu_metrics->average_core_power[0], 1947 &metrics.Average.CorePower[0], 1948 sizeof(uint16_t) * 4); 1949 1950 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1951 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1952 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1953 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1954 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1955 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1956 1957 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1958 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1959 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1960 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1961 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1962 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1963 1964 memcpy(&gpu_metrics->current_coreclk[0], 1965 &metrics.Current.CoreFrequency[0], 1966 sizeof(uint16_t) * 4); 1967 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1968 1969 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1970 gpu_metrics->indep_throttle_status = 1971 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1972 vangogh_throttler_map); 1973 1974 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1975 1976 *table = (void *)gpu_metrics; 1977 1978 return sizeof(struct gpu_metrics_v2_2); 1979 } 1980 1981 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, 1982 void **table) 1983 { 1984 uint32_t smu_program; 1985 uint32_t fw_version; 1986 int ret = 0; 1987 1988 smu_program = (smu->smc_fw_version >> 24) & 0xff; 1989 fw_version = smu->smc_fw_version & 0xffffff; 1990 if (smu_program == 6) { 1991 if (fw_version >= 0x3F0800) 1992 ret = vangogh_get_gpu_metrics_v2_4(smu, table); 1993 else 1994 ret = vangogh_get_gpu_metrics_v2_3(smu, table); 1995 1996 } else { 1997 if (smu->smc_fw_version >= 0x043F3E00) { 1998 if (smu->smc_fw_if_version < 0x3) 1999 ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); 2000 else 2001 ret = vangogh_get_gpu_metrics_v2_3(smu, table); 2002 } else { 2003 if (smu->smc_fw_if_version < 0x3) 2004 ret = vangogh_get_legacy_gpu_metrics(smu, table); 2005 else 2006 ret = vangogh_get_gpu_metrics(smu, table); 2007 } 2008 } 2009 2010 return ret; 2011 } 2012 2013 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, 2014 long input[], uint32_t size) 2015 { 2016 int ret = 0; 2017 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2018 2019 if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { 2020 dev_warn(smu->adev->dev, 2021 "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); 2022 return -EINVAL; 2023 } 2024 2025 switch (type) { 2026 case PP_OD_EDIT_CCLK_VDDC_TABLE: 2027 if (size != 3) { 2028 dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n"); 2029 return -EINVAL; 2030 } 2031 if (input[0] >= smu->cpu_core_num) { 2032 dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n", 2033 smu->cpu_core_num); 2034 } 2035 smu->cpu_core_id_select = input[0]; 2036 if (input[1] == 0) { 2037 if (input[2] < smu->cpu_default_soft_min_freq) { 2038 dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 2039 input[2], smu->cpu_default_soft_min_freq); 2040 return -EINVAL; 2041 } 2042 smu->cpu_actual_soft_min_freq = input[2]; 2043 } else if (input[1] == 1) { 2044 if (input[2] > smu->cpu_default_soft_max_freq) { 2045 dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 2046 input[2], smu->cpu_default_soft_max_freq); 2047 return -EINVAL; 2048 } 2049 smu->cpu_actual_soft_max_freq = input[2]; 2050 } else { 2051 return -EINVAL; 2052 } 2053 break; 2054 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2055 if (size != 2) { 2056 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2057 return -EINVAL; 2058 } 2059 2060 if (input[0] == 0) { 2061 if (input[1] < smu->gfx_default_hard_min_freq) { 2062 dev_warn(smu->adev->dev, 2063 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 2064 input[1], smu->gfx_default_hard_min_freq); 2065 return -EINVAL; 2066 } 2067 smu->gfx_actual_hard_min_freq = input[1]; 2068 } else if (input[0] == 1) { 2069 if (input[1] > smu->gfx_default_soft_max_freq) { 2070 dev_warn(smu->adev->dev, 2071 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 2072 input[1], smu->gfx_default_soft_max_freq); 2073 return -EINVAL; 2074 } 2075 smu->gfx_actual_soft_max_freq = input[1]; 2076 } else { 2077 return -EINVAL; 2078 } 2079 break; 2080 case PP_OD_RESTORE_DEFAULT_TABLE: 2081 if (size != 0) { 2082 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2083 return -EINVAL; 2084 } else { 2085 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 2086 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 2087 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 2088 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 2089 } 2090 break; 2091 case PP_OD_COMMIT_DPM_TABLE: 2092 if (size != 0) { 2093 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2094 return -EINVAL; 2095 } else { 2096 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 2097 dev_err(smu->adev->dev, 2098 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 2099 smu->gfx_actual_hard_min_freq, 2100 smu->gfx_actual_soft_max_freq); 2101 return -EINVAL; 2102 } 2103 2104 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 2105 smu->gfx_actual_hard_min_freq, NULL); 2106 if (ret) { 2107 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 2108 return ret; 2109 } 2110 2111 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 2112 smu->gfx_actual_soft_max_freq, NULL); 2113 if (ret) { 2114 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 2115 return ret; 2116 } 2117 2118 if (smu->adev->pm.fw_version < 0x43f1b00) { 2119 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n"); 2120 break; 2121 } 2122 2123 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 2124 ((smu->cpu_core_id_select << 20) 2125 | smu->cpu_actual_soft_min_freq), 2126 NULL); 2127 if (ret) { 2128 dev_err(smu->adev->dev, "Set hard min cclk failed!"); 2129 return ret; 2130 } 2131 2132 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 2133 ((smu->cpu_core_id_select << 20) 2134 | smu->cpu_actual_soft_max_freq), 2135 NULL); 2136 if (ret) { 2137 dev_err(smu->adev->dev, "Set soft max cclk failed!"); 2138 return ret; 2139 } 2140 } 2141 break; 2142 default: 2143 return -ENOSYS; 2144 } 2145 2146 return ret; 2147 } 2148 2149 static int vangogh_set_default_dpm_tables(struct smu_context *smu) 2150 { 2151 struct smu_table_context *smu_table = &smu->smu_table; 2152 2153 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 2154 } 2155 2156 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 2157 { 2158 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 2159 2160 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 2161 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 2162 smu->gfx_actual_hard_min_freq = 0; 2163 smu->gfx_actual_soft_max_freq = 0; 2164 2165 smu->cpu_default_soft_min_freq = 1400; 2166 smu->cpu_default_soft_max_freq = 3500; 2167 smu->cpu_actual_soft_min_freq = 0; 2168 smu->cpu_actual_soft_max_freq = 0; 2169 2170 return 0; 2171 } 2172 2173 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) 2174 { 2175 DpmClocks_t *table = smu->smu_table.clocks_table; 2176 int i; 2177 2178 if (!clock_table || !table) 2179 return -EINVAL; 2180 2181 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { 2182 clock_table->SocClocks[i].Freq = table->SocClocks[i]; 2183 clock_table->SocClocks[i].Vol = table->SocVoltage[i]; 2184 } 2185 2186 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 2187 clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk; 2188 clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage; 2189 } 2190 2191 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 2192 clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk; 2193 clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage; 2194 } 2195 2196 return 0; 2197 } 2198 2199 static int vangogh_notify_rlc_state(struct smu_context *smu, bool en) 2200 { 2201 struct amdgpu_device *adev = smu->adev; 2202 int ret = 0; 2203 2204 if (adev->pm.fw_version >= 0x43f1700 && !en) 2205 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, 2206 RLC_STATUS_OFF, NULL); 2207 2208 return ret; 2209 } 2210 2211 static int vangogh_post_smu_init(struct smu_context *smu) 2212 { 2213 struct amdgpu_device *adev = smu->adev; 2214 uint32_t tmp; 2215 int ret = 0; 2216 uint8_t aon_bits = 0; 2217 /* Two CUs in one WGP */ 2218 uint32_t req_active_wgps = adev->gfx.cu_info.number/2; 2219 uint32_t total_cu = adev->gfx.config.max_cu_per_sh * 2220 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2221 2222 if (adev->in_s0ix) 2223 return 0; 2224 2225 /* allow message will be sent after enable message on Vangogh*/ 2226 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && 2227 (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 2228 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); 2229 if (ret) { 2230 dev_err(adev->dev, "Failed to Enable GfxOff!\n"); 2231 return ret; 2232 } 2233 } else { 2234 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2235 dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n"); 2236 } 2237 2238 /* if all CUs are active, no need to power off any WGPs */ 2239 if (total_cu == adev->gfx.cu_info.number) 2240 return 0; 2241 2242 /* 2243 * Calculate the total bits number of always on WGPs for all SA/SEs in 2244 * RLC_PG_ALWAYS_ON_WGP_MASK. 2245 */ 2246 tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK)); 2247 tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK; 2248 2249 aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2250 2251 /* Do not request any WGPs less than set in the AON_WGP_MASK */ 2252 if (aon_bits > req_active_wgps) { 2253 dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n"); 2254 return 0; 2255 } else { 2256 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL); 2257 } 2258 } 2259 2260 static int vangogh_mode_reset(struct smu_context *smu, int type) 2261 { 2262 int ret = 0, index = 0; 2263 2264 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 2265 SMU_MSG_GfxDeviceDriverReset); 2266 if (index < 0) 2267 return index == -EACCES ? 0 : index; 2268 2269 mutex_lock(&smu->message_lock); 2270 2271 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); 2272 2273 mutex_unlock(&smu->message_lock); 2274 2275 mdelay(10); 2276 2277 return ret; 2278 } 2279 2280 static int vangogh_mode2_reset(struct smu_context *smu) 2281 { 2282 return vangogh_mode_reset(smu, SMU_RESET_MODE_2); 2283 } 2284 2285 /** 2286 * vangogh_get_gfxoff_status - Get gfxoff status 2287 * 2288 * @smu: amdgpu_device pointer 2289 * 2290 * Get current gfxoff status 2291 * 2292 * Return: 2293 * * 0 - GFXOFF (default if enabled). 2294 * * 1 - Transition out of GFX State. 2295 * * 2 - Not in GFXOFF. 2296 * * 3 - Transition into GFXOFF. 2297 */ 2298 static u32 vangogh_get_gfxoff_status(struct smu_context *smu) 2299 { 2300 struct amdgpu_device *adev = smu->adev; 2301 u32 reg, gfxoff_status; 2302 2303 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL); 2304 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK) 2305 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT; 2306 2307 return gfxoff_status; 2308 } 2309 2310 static int vangogh_get_power_limit(struct smu_context *smu, 2311 uint32_t *current_power_limit, 2312 uint32_t *default_power_limit, 2313 uint32_t *max_power_limit, 2314 uint32_t *min_power_limit) 2315 { 2316 struct smu_11_5_power_context *power_context = smu->smu_power.power_context; 2317 uint32_t ppt_limit; 2318 int ret = 0; 2319 2320 if (smu->adev->pm.fw_version < 0x43f1e00) 2321 return ret; 2322 2323 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit); 2324 if (ret) { 2325 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n"); 2326 return ret; 2327 } 2328 /* convert from milliwatt to watt */ 2329 if (current_power_limit) 2330 *current_power_limit = ppt_limit / 1000; 2331 if (default_power_limit) 2332 *default_power_limit = ppt_limit / 1000; 2333 if (max_power_limit) 2334 *max_power_limit = 29; 2335 if (min_power_limit) 2336 *min_power_limit = 0; 2337 2338 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); 2339 if (ret) { 2340 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); 2341 return ret; 2342 } 2343 /* convert from milliwatt to watt */ 2344 power_context->current_fast_ppt_limit = 2345 power_context->default_fast_ppt_limit = ppt_limit / 1000; 2346 power_context->max_fast_ppt_limit = 30; 2347 2348 return ret; 2349 } 2350 2351 static int vangogh_get_ppt_limit(struct smu_context *smu, 2352 uint32_t *ppt_limit, 2353 enum smu_ppt_limit_type type, 2354 enum smu_ppt_limit_level level) 2355 { 2356 struct smu_11_5_power_context *power_context = smu->smu_power.power_context; 2357 2358 if (!power_context) 2359 return -EOPNOTSUPP; 2360 2361 if (type == SMU_FAST_PPT_LIMIT) { 2362 switch (level) { 2363 case SMU_PPT_LIMIT_MAX: 2364 *ppt_limit = power_context->max_fast_ppt_limit; 2365 break; 2366 case SMU_PPT_LIMIT_CURRENT: 2367 *ppt_limit = power_context->current_fast_ppt_limit; 2368 break; 2369 case SMU_PPT_LIMIT_DEFAULT: 2370 *ppt_limit = power_context->default_fast_ppt_limit; 2371 break; 2372 default: 2373 break; 2374 } 2375 } 2376 2377 return 0; 2378 } 2379 2380 static int vangogh_set_power_limit(struct smu_context *smu, 2381 enum smu_ppt_limit_type limit_type, 2382 uint32_t ppt_limit) 2383 { 2384 struct smu_11_5_power_context *power_context = 2385 smu->smu_power.power_context; 2386 int ret = 0; 2387 2388 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 2389 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 2390 return -EOPNOTSUPP; 2391 } 2392 2393 switch (limit_type) { 2394 case SMU_DEFAULT_PPT_LIMIT: 2395 ret = smu_cmn_send_smc_msg_with_param(smu, 2396 SMU_MSG_SetSlowPPTLimit, 2397 ppt_limit * 1000, /* convert from watt to milliwatt */ 2398 NULL); 2399 if (ret) 2400 return ret; 2401 2402 smu->current_power_limit = ppt_limit; 2403 break; 2404 case SMU_FAST_PPT_LIMIT: 2405 if (ppt_limit > power_context->max_fast_ppt_limit) { 2406 dev_err(smu->adev->dev, 2407 "New power limit (%d) is over the max allowed %d\n", 2408 ppt_limit, power_context->max_fast_ppt_limit); 2409 return ret; 2410 } 2411 2412 ret = smu_cmn_send_smc_msg_with_param(smu, 2413 SMU_MSG_SetFastPPTLimit, 2414 ppt_limit * 1000, /* convert from watt to milliwatt */ 2415 NULL); 2416 if (ret) 2417 return ret; 2418 2419 power_context->current_fast_ppt_limit = ppt_limit; 2420 break; 2421 default: 2422 return -EINVAL; 2423 } 2424 2425 return ret; 2426 } 2427 2428 /** 2429 * vangogh_set_gfxoff_residency 2430 * 2431 * @smu: amdgpu_device pointer 2432 * @start: start/stop residency log 2433 * 2434 * This function will be used to log gfxoff residency 2435 * 2436 * 2437 * Returns standard response codes. 2438 */ 2439 static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start) 2440 { 2441 int ret = 0; 2442 u32 residency; 2443 struct amdgpu_device *adev = smu->adev; 2444 2445 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2446 return 0; 2447 2448 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency, 2449 start, &residency); 2450 if (ret) 2451 return ret; 2452 2453 if (!start) 2454 adev->gfx.gfx_off_residency = residency; 2455 2456 return ret; 2457 } 2458 2459 /** 2460 * vangogh_get_gfxoff_residency 2461 * 2462 * @smu: amdgpu_device pointer 2463 * @residency: placeholder for return value 2464 * 2465 * This function will be used to get gfxoff residency. 2466 * 2467 * Returns standard response codes. 2468 */ 2469 static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency) 2470 { 2471 struct amdgpu_device *adev = smu->adev; 2472 2473 *residency = adev->gfx.gfx_off_residency; 2474 2475 return 0; 2476 } 2477 2478 /** 2479 * vangogh_get_gfxoff_entrycount - get gfxoff entry count 2480 * 2481 * @smu: amdgpu_device pointer 2482 * @entrycount: placeholder for return value 2483 * 2484 * This function will be used to get gfxoff entry count 2485 * 2486 * Returns standard response codes. 2487 */ 2488 static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount) 2489 { 2490 int ret = 0, value = 0; 2491 struct amdgpu_device *adev = smu->adev; 2492 2493 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2494 return 0; 2495 2496 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value); 2497 *entrycount = value + adev->gfx.gfx_off_entrycount; 2498 2499 return ret; 2500 } 2501 2502 static const struct pptable_funcs vangogh_ppt_funcs = { 2503 2504 .check_fw_status = smu_v11_0_check_fw_status, 2505 .check_fw_version = smu_v11_0_check_fw_version, 2506 .init_smc_tables = vangogh_init_smc_tables, 2507 .fini_smc_tables = smu_v11_0_fini_smc_tables, 2508 .init_power = smu_v11_0_init_power, 2509 .fini_power = smu_v11_0_fini_power, 2510 .register_irq_handler = smu_v11_0_register_irq_handler, 2511 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 2512 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 2513 .send_smc_msg = smu_cmn_send_smc_msg, 2514 .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable, 2515 .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable, 2516 .is_dpm_running = vangogh_is_dpm_running, 2517 .read_sensor = vangogh_read_sensor, 2518 .get_apu_thermal_limit = vangogh_get_apu_thermal_limit, 2519 .set_apu_thermal_limit = vangogh_set_apu_thermal_limit, 2520 .get_enabled_mask = smu_cmn_get_enabled_mask, 2521 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2522 .set_watermarks_table = vangogh_set_watermarks_table, 2523 .set_driver_table_location = smu_v11_0_set_driver_table_location, 2524 .interrupt_work = smu_v11_0_interrupt_work, 2525 .get_gpu_metrics = vangogh_common_get_gpu_metrics, 2526 .od_edit_dpm_table = vangogh_od_edit_dpm_table, 2527 .print_clk_levels = vangogh_common_print_clk_levels, 2528 .set_default_dpm_table = vangogh_set_default_dpm_tables, 2529 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, 2530 .notify_rlc_state = vangogh_notify_rlc_state, 2531 .feature_is_enabled = smu_cmn_feature_is_enabled, 2532 .set_power_profile_mode = vangogh_set_power_profile_mode, 2533 .get_power_profile_mode = vangogh_get_power_profile_mode, 2534 .get_dpm_clock_table = vangogh_get_dpm_clock_table, 2535 .force_clk_levels = vangogh_force_clk_levels, 2536 .set_performance_level = vangogh_set_performance_level, 2537 .post_init = vangogh_post_smu_init, 2538 .mode2_reset = vangogh_mode2_reset, 2539 .gfx_off_control = smu_v11_0_gfx_off_control, 2540 .get_gfx_off_status = vangogh_get_gfxoff_status, 2541 .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount, 2542 .get_gfx_off_residency = vangogh_get_gfxoff_residency, 2543 .set_gfx_off_residency = vangogh_set_gfxoff_residency, 2544 .get_ppt_limit = vangogh_get_ppt_limit, 2545 .get_power_limit = vangogh_get_power_limit, 2546 .set_power_limit = vangogh_set_power_limit, 2547 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, 2548 }; 2549 2550 void vangogh_set_ppt_funcs(struct smu_context *smu) 2551 { 2552 smu->ppt_funcs = &vangogh_ppt_funcs; 2553 smu->message_map = vangogh_message_map; 2554 smu->feature_map = vangogh_feature_mask_map; 2555 smu->table_map = vangogh_table_map; 2556 smu->workload_map = vangogh_workload_map; 2557 smu->is_apu = true; 2558 smu_v11_0_set_smu_mailbox_registers(smu); 2559 } 2560