1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v11_0.h" 29 #include "smu11_driver_if_vangogh.h" 30 #include "vangogh_ppt.h" 31 #include "smu_v11_5_ppsmc.h" 32 #include "smu_v11_5_pmfw.h" 33 #include "smu_cmn.h" 34 #include "soc15_common.h" 35 #include "asic_reg/gc/gc_10_3_0_offset.h" 36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h" 37 #include <asm/processor.h> 38 39 /* 40 * DO NOT use these for err/warn/info/debug messages. 41 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 42 * They are more MGPU friendly. 43 */ 44 #undef pr_err 45 #undef pr_warn 46 #undef pr_info 47 #undef pr_debug 48 49 // Registers related to GFXOFF 50 // addressBlock: smuio_smuio_SmuSmuioDec 51 // base address: 0x5a000 52 #define mmSMUIO_GFX_MISC_CNTL 0x00c5 53 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 54 55 //SMUIO_GFX_MISC_CNTL 56 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 57 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 58 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L 59 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L 60 61 #define FEATURE_MASK(feature) (1ULL << feature) 62 #define SMC_DPM_FEATURE ( \ 63 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 68 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 69 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 70 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 71 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 72 73 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { 74 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 75 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), 76 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0), 77 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0), 78 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 79 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 80 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0), 81 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0), 82 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 83 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 84 MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0), 85 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0), 86 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0), 87 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0), 88 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0), 89 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0), 90 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), 91 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), 92 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), 93 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 94 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0), 95 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), 96 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0), 97 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0), 98 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0), 99 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0), 100 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0), 101 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0), 102 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), 103 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0), 104 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0), 105 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0), 106 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0), 107 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0), 108 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 109 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 110 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0), 111 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0), 112 MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0), 113 MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0), 114 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 115 MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0), 116 MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0), 117 MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0), 118 MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0), 119 MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0), 120 MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0), 121 MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0), 122 MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0), 123 MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0), 124 MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0), 125 MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0), 126 MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0), 127 MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0), 128 MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0), 129 MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0), 130 MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0), 131 MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0), 132 MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0), 133 MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0), 134 MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0), 135 MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0), 136 MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0), 137 MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0), 138 MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0), 139 MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0), 140 MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0), 141 MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0), 142 MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0), 143 MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0), 144 }; 145 146 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = { 147 FEA_MAP(PPT), 148 FEA_MAP(TDC), 149 FEA_MAP(THERMAL), 150 FEA_MAP(DS_GFXCLK), 151 FEA_MAP(DS_SOCCLK), 152 FEA_MAP(DS_LCLK), 153 FEA_MAP(DS_FCLK), 154 FEA_MAP(DS_MP1CLK), 155 FEA_MAP(DS_MP0CLK), 156 FEA_MAP(ATHUB_PG), 157 FEA_MAP(CCLK_DPM), 158 FEA_MAP(FAN_CONTROLLER), 159 FEA_MAP(ULV), 160 FEA_MAP(VCN_DPM), 161 FEA_MAP(LCLK_DPM), 162 FEA_MAP(SHUBCLK_DPM), 163 FEA_MAP(DCFCLK_DPM), 164 FEA_MAP(DS_DCFCLK), 165 FEA_MAP(S0I2), 166 FEA_MAP(SMU_LOW_POWER), 167 FEA_MAP(GFX_DEM), 168 FEA_MAP(PSI), 169 FEA_MAP(PROCHOT), 170 FEA_MAP(CPUOFF), 171 FEA_MAP(STAPM), 172 FEA_MAP(S0I3), 173 FEA_MAP(DF_CSTATES), 174 FEA_MAP(PERF_LIMIT), 175 FEA_MAP(CORE_DLDO), 176 FEA_MAP(RSMU_LOW_POWER), 177 FEA_MAP(SMN_LOW_POWER), 178 FEA_MAP(THM_LOW_POWER), 179 FEA_MAP(SMUIO_LOW_POWER), 180 FEA_MAP(MP1_LOW_POWER), 181 FEA_MAP(DS_VCN), 182 FEA_MAP(CPPC), 183 FEA_MAP(OS_CSTATES), 184 FEA_MAP(ISP_DPM), 185 FEA_MAP(A55_DPM), 186 FEA_MAP(CVIP_DSP_DPM), 187 FEA_MAP(MSMU_LOW_POWER), 188 FEA_MAP_REVERSE(SOCCLK), 189 FEA_MAP_REVERSE(FCLK), 190 FEA_MAP_HALF_REVERSE(GFX), 191 }; 192 193 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = { 194 TAB_MAP_VALID(WATERMARKS), 195 TAB_MAP_VALID(SMU_METRICS), 196 TAB_MAP_VALID(CUSTOM_DPM), 197 TAB_MAP_VALID(DPMCLOCKS), 198 }; 199 200 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 201 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 202 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 203 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 204 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 205 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 206 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CAPPED, WORKLOAD_PPLIB_CAPPED_BIT), 207 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_UNCAPPED, WORKLOAD_PPLIB_UNCAPPED_BIT), 208 }; 209 210 static const uint8_t vangogh_throttler_map[] = { 211 [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT), 212 [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT), 213 [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT), 214 [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT), 215 [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT), 216 [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT), 217 [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT), 218 [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT), 219 [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT), 220 [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT), 221 [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT), 222 }; 223 224 static int vangogh_tables_init(struct smu_context *smu) 225 { 226 struct smu_table_context *smu_table = &smu->smu_table; 227 struct smu_table *tables = smu_table->tables; 228 229 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 230 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 231 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 232 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 233 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, 234 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 235 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 236 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 237 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), 238 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 239 240 smu_table->metrics_table = kzalloc(max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), GFP_KERNEL); 241 if (!smu_table->metrics_table) 242 goto err0_out; 243 smu_table->metrics_time = 0; 244 245 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 246 smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3)); 247 smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4)); 248 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 249 if (!smu_table->gpu_metrics_table) 250 goto err1_out; 251 252 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 253 if (!smu_table->watermarks_table) 254 goto err2_out; 255 256 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 257 if (!smu_table->clocks_table) 258 goto err3_out; 259 260 return 0; 261 262 err3_out: 263 kfree(smu_table->watermarks_table); 264 err2_out: 265 kfree(smu_table->gpu_metrics_table); 266 err1_out: 267 kfree(smu_table->metrics_table); 268 err0_out: 269 return -ENOMEM; 270 } 271 272 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, 273 MetricsMember_t member, 274 uint32_t *value) 275 { 276 struct smu_table_context *smu_table = &smu->smu_table; 277 SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; 278 int ret = 0; 279 280 ret = smu_cmn_get_metrics_table(smu, 281 NULL, 282 false); 283 if (ret) 284 return ret; 285 286 switch (member) { 287 case METRICS_CURR_GFXCLK: 288 *value = metrics->GfxclkFrequency; 289 break; 290 case METRICS_AVERAGE_SOCCLK: 291 *value = metrics->SocclkFrequency; 292 break; 293 case METRICS_AVERAGE_VCLK: 294 *value = metrics->VclkFrequency; 295 break; 296 case METRICS_AVERAGE_DCLK: 297 *value = metrics->DclkFrequency; 298 break; 299 case METRICS_CURR_UCLK: 300 *value = metrics->MemclkFrequency; 301 break; 302 case METRICS_AVERAGE_GFXACTIVITY: 303 *value = metrics->GfxActivity / 100; 304 break; 305 case METRICS_AVERAGE_VCNACTIVITY: 306 *value = metrics->UvdActivity / 100; 307 break; 308 case METRICS_AVERAGE_SOCKETPOWER: 309 *value = (metrics->CurrentSocketPower << 8) / 310 1000 ; 311 break; 312 case METRICS_TEMPERATURE_EDGE: 313 *value = metrics->GfxTemperature / 100 * 314 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 315 break; 316 case METRICS_TEMPERATURE_HOTSPOT: 317 *value = metrics->SocTemperature / 100 * 318 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 319 break; 320 case METRICS_THROTTLER_STATUS: 321 *value = metrics->ThrottlerStatus; 322 break; 323 case METRICS_VOLTAGE_VDDGFX: 324 *value = metrics->Voltage[2]; 325 break; 326 case METRICS_VOLTAGE_VDDSOC: 327 *value = metrics->Voltage[1]; 328 break; 329 case METRICS_AVERAGE_CPUCLK: 330 memcpy(value, &metrics->CoreFrequency[0], 331 smu->cpu_core_num * sizeof(uint16_t)); 332 break; 333 default: 334 *value = UINT_MAX; 335 break; 336 } 337 338 return ret; 339 } 340 341 static int vangogh_get_smu_metrics_data(struct smu_context *smu, 342 MetricsMember_t member, 343 uint32_t *value) 344 { 345 struct smu_table_context *smu_table = &smu->smu_table; 346 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 347 int ret = 0; 348 349 ret = smu_cmn_get_metrics_table(smu, 350 NULL, 351 false); 352 if (ret) 353 return ret; 354 355 switch (member) { 356 case METRICS_CURR_GFXCLK: 357 *value = metrics->Current.GfxclkFrequency; 358 break; 359 case METRICS_AVERAGE_SOCCLK: 360 *value = metrics->Current.SocclkFrequency; 361 break; 362 case METRICS_AVERAGE_VCLK: 363 *value = metrics->Current.VclkFrequency; 364 break; 365 case METRICS_AVERAGE_DCLK: 366 *value = metrics->Current.DclkFrequency; 367 break; 368 case METRICS_CURR_UCLK: 369 *value = metrics->Current.MemclkFrequency; 370 break; 371 case METRICS_AVERAGE_GFXACTIVITY: 372 *value = metrics->Current.GfxActivity; 373 break; 374 case METRICS_AVERAGE_VCNACTIVITY: 375 *value = metrics->Current.UvdActivity; 376 break; 377 case METRICS_AVERAGE_SOCKETPOWER: 378 *value = (metrics->Average.CurrentSocketPower << 8) / 379 1000; 380 break; 381 case METRICS_CURR_SOCKETPOWER: 382 *value = (metrics->Current.CurrentSocketPower << 8) / 383 1000; 384 break; 385 case METRICS_TEMPERATURE_EDGE: 386 *value = metrics->Current.GfxTemperature / 100 * 387 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 388 break; 389 case METRICS_TEMPERATURE_HOTSPOT: 390 *value = metrics->Current.SocTemperature / 100 * 391 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 392 break; 393 case METRICS_THROTTLER_STATUS: 394 *value = metrics->Current.ThrottlerStatus; 395 break; 396 case METRICS_VOLTAGE_VDDGFX: 397 *value = metrics->Current.Voltage[2]; 398 break; 399 case METRICS_VOLTAGE_VDDSOC: 400 *value = metrics->Current.Voltage[1]; 401 break; 402 case METRICS_AVERAGE_CPUCLK: 403 memcpy(value, &metrics->Current.CoreFrequency[0], 404 smu->cpu_core_num * sizeof(uint16_t)); 405 break; 406 default: 407 *value = UINT_MAX; 408 break; 409 } 410 411 return ret; 412 } 413 414 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, 415 MetricsMember_t member, 416 uint32_t *value) 417 { 418 int ret = 0; 419 420 if (smu->smc_fw_if_version < 0x3) 421 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); 422 else 423 ret = vangogh_get_smu_metrics_data(smu, member, value); 424 425 return ret; 426 } 427 428 static int vangogh_allocate_dpm_context(struct smu_context *smu) 429 { 430 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 431 432 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), 433 GFP_KERNEL); 434 if (!smu_dpm->dpm_context) 435 return -ENOMEM; 436 437 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); 438 439 return 0; 440 } 441 442 static int vangogh_init_smc_tables(struct smu_context *smu) 443 { 444 int ret = 0; 445 446 ret = vangogh_tables_init(smu); 447 if (ret) 448 return ret; 449 450 ret = vangogh_allocate_dpm_context(smu); 451 if (ret) 452 return ret; 453 454 #ifdef CONFIG_X86 455 /* AMD x86 APU only */ 456 smu->cpu_core_num = topology_num_cores_per_package(); 457 #else 458 smu->cpu_core_num = 4; 459 #endif 460 461 return smu_v11_0_init_smc_tables(smu); 462 } 463 464 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, 465 bool enable, 466 int inst) 467 { 468 int ret = 0; 469 470 if (enable) { 471 /* vcn dpm on is a prerequisite for vcn power gate messages */ 472 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); 473 if (ret) 474 return ret; 475 } else { 476 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); 477 if (ret) 478 return ret; 479 } 480 481 return ret; 482 } 483 484 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 485 { 486 int ret = 0; 487 488 if (enable) { 489 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); 490 if (ret) 491 return ret; 492 } else { 493 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 494 if (ret) 495 return ret; 496 } 497 498 return ret; 499 } 500 501 static bool vangogh_is_dpm_running(struct smu_context *smu) 502 { 503 struct amdgpu_device *adev = smu->adev; 504 int ret = 0; 505 uint64_t feature_enabled; 506 507 /* we need to re-init after suspend so return false */ 508 if (adev->in_suspend) 509 return false; 510 511 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 512 513 if (ret) 514 return false; 515 516 return !!(feature_enabled & SMC_DPM_FEATURE); 517 } 518 519 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, 520 uint32_t dpm_level, uint32_t *freq) 521 { 522 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 523 524 if (!clk_table || clk_type >= SMU_CLK_COUNT) 525 return -EINVAL; 526 527 switch (clk_type) { 528 case SMU_SOCCLK: 529 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 530 return -EINVAL; 531 *freq = clk_table->SocClocks[dpm_level]; 532 break; 533 case SMU_VCLK: 534 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 535 return -EINVAL; 536 *freq = clk_table->VcnClocks[dpm_level].vclk; 537 break; 538 case SMU_DCLK: 539 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 540 return -EINVAL; 541 *freq = clk_table->VcnClocks[dpm_level].dclk; 542 break; 543 case SMU_UCLK: 544 case SMU_MCLK: 545 if (dpm_level >= clk_table->NumDfPstatesEnabled) 546 return -EINVAL; 547 *freq = clk_table->DfPstateTable[dpm_level].memclk; 548 549 break; 550 case SMU_FCLK: 551 if (dpm_level >= clk_table->NumDfPstatesEnabled) 552 return -EINVAL; 553 *freq = clk_table->DfPstateTable[dpm_level].fclk; 554 break; 555 default: 556 return -EINVAL; 557 } 558 559 return 0; 560 } 561 562 static int vangogh_print_legacy_clk_levels(struct smu_context *smu, 563 enum smu_clk_type clk_type, char *buf) 564 { 565 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 566 SmuMetrics_legacy_t metrics; 567 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 568 int i, idx, size = 0, ret = 0; 569 uint32_t cur_value = 0, value = 0, count = 0; 570 bool cur_value_match_level = false; 571 572 memset(&metrics, 0, sizeof(metrics)); 573 574 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 575 if (ret) 576 return ret; 577 578 smu_cmn_get_sysfs_buf(&buf, &size); 579 580 switch (clk_type) { 581 case SMU_OD_SCLK: 582 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 583 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 584 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 585 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 586 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 587 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 588 } 589 break; 590 case SMU_OD_CCLK: 591 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 592 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 593 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 594 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 595 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 596 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 597 } 598 break; 599 case SMU_OD_RANGE: 600 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 601 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 602 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 603 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 604 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 605 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 606 } 607 break; 608 case SMU_SOCCLK: 609 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 610 count = clk_table->NumSocClkLevelsEnabled; 611 cur_value = metrics.SocclkFrequency; 612 break; 613 case SMU_VCLK: 614 count = clk_table->VcnClkLevelsEnabled; 615 cur_value = metrics.VclkFrequency; 616 break; 617 case SMU_DCLK: 618 count = clk_table->VcnClkLevelsEnabled; 619 cur_value = metrics.DclkFrequency; 620 break; 621 case SMU_MCLK: 622 count = clk_table->NumDfPstatesEnabled; 623 cur_value = metrics.MemclkFrequency; 624 break; 625 case SMU_FCLK: 626 count = clk_table->NumDfPstatesEnabled; 627 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 628 if (ret) 629 return ret; 630 break; 631 default: 632 break; 633 } 634 635 switch (clk_type) { 636 case SMU_SOCCLK: 637 case SMU_VCLK: 638 case SMU_DCLK: 639 case SMU_MCLK: 640 case SMU_FCLK: 641 for (i = 0; i < count; i++) { 642 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 643 ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); 644 if (ret) 645 return ret; 646 if (!value) 647 continue; 648 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 649 cur_value == value ? "*" : ""); 650 if (cur_value == value) 651 cur_value_match_level = true; 652 } 653 654 if (!cur_value_match_level) 655 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 656 break; 657 default: 658 break; 659 } 660 661 return size; 662 } 663 664 static int vangogh_print_clk_levels(struct smu_context *smu, 665 enum smu_clk_type clk_type, char *buf) 666 { 667 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 668 SmuMetrics_t metrics; 669 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 670 int i, idx, size = 0, ret = 0; 671 uint32_t cur_value = 0, value = 0, count = 0; 672 bool cur_value_match_level = false; 673 uint32_t min, max; 674 675 memset(&metrics, 0, sizeof(metrics)); 676 677 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 678 if (ret) 679 return ret; 680 681 smu_cmn_get_sysfs_buf(&buf, &size); 682 683 switch (clk_type) { 684 case SMU_OD_SCLK: 685 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 686 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 687 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 688 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 689 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 690 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 691 } 692 break; 693 case SMU_OD_CCLK: 694 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 695 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 696 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 697 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 698 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 699 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 700 } 701 break; 702 case SMU_OD_RANGE: 703 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 704 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 705 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 706 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 707 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", 708 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 709 } 710 break; 711 case SMU_SOCCLK: 712 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 713 count = clk_table->NumSocClkLevelsEnabled; 714 cur_value = metrics.Current.SocclkFrequency; 715 break; 716 case SMU_VCLK: 717 count = clk_table->VcnClkLevelsEnabled; 718 cur_value = metrics.Current.VclkFrequency; 719 break; 720 case SMU_DCLK: 721 count = clk_table->VcnClkLevelsEnabled; 722 cur_value = metrics.Current.DclkFrequency; 723 break; 724 case SMU_MCLK: 725 count = clk_table->NumDfPstatesEnabled; 726 cur_value = metrics.Current.MemclkFrequency; 727 break; 728 case SMU_FCLK: 729 count = clk_table->NumDfPstatesEnabled; 730 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 731 if (ret) 732 return ret; 733 break; 734 case SMU_GFXCLK: 735 case SMU_SCLK: 736 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); 737 if (ret) { 738 return ret; 739 } 740 break; 741 default: 742 break; 743 } 744 745 switch (clk_type) { 746 case SMU_SOCCLK: 747 case SMU_VCLK: 748 case SMU_DCLK: 749 case SMU_MCLK: 750 case SMU_FCLK: 751 for (i = 0; i < count; i++) { 752 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 753 ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); 754 if (ret) 755 return ret; 756 if (!value) 757 continue; 758 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 759 cur_value == value ? "*" : ""); 760 if (cur_value == value) 761 cur_value_match_level = true; 762 } 763 764 if (!cur_value_match_level) 765 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 766 break; 767 case SMU_GFXCLK: 768 case SMU_SCLK: 769 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 770 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 771 if (cur_value == max) 772 i = 2; 773 else if (cur_value == min) 774 i = 0; 775 else 776 i = 1; 777 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 778 i == 0 ? "*" : ""); 779 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 780 i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, 781 i == 1 ? "*" : ""); 782 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 783 i == 2 ? "*" : ""); 784 break; 785 default: 786 break; 787 } 788 789 return size; 790 } 791 792 static int vangogh_common_print_clk_levels(struct smu_context *smu, 793 enum smu_clk_type clk_type, char *buf) 794 { 795 int ret = 0; 796 797 if (smu->smc_fw_if_version < 0x3) 798 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); 799 else 800 ret = vangogh_print_clk_levels(smu, clk_type, buf); 801 802 return ret; 803 } 804 805 static int vangogh_get_profiling_clk_mask(struct smu_context *smu, 806 enum amd_dpm_forced_level level, 807 uint32_t *vclk_mask, 808 uint32_t *dclk_mask, 809 uint32_t *mclk_mask, 810 uint32_t *fclk_mask, 811 uint32_t *soc_mask) 812 { 813 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 814 815 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 816 if (mclk_mask) 817 *mclk_mask = clk_table->NumDfPstatesEnabled - 1; 818 819 if (fclk_mask) 820 *fclk_mask = clk_table->NumDfPstatesEnabled - 1; 821 822 if (soc_mask) 823 *soc_mask = 0; 824 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 825 if (mclk_mask) 826 *mclk_mask = 0; 827 828 if (fclk_mask) 829 *fclk_mask = 0; 830 831 if (soc_mask) 832 *soc_mask = 1; 833 834 if (vclk_mask) 835 *vclk_mask = 1; 836 837 if (dclk_mask) 838 *dclk_mask = 1; 839 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) { 840 if (mclk_mask) 841 *mclk_mask = 0; 842 843 if (fclk_mask) 844 *fclk_mask = 0; 845 846 if (soc_mask) 847 *soc_mask = 1; 848 849 if (vclk_mask) 850 *vclk_mask = 1; 851 852 if (dclk_mask) 853 *dclk_mask = 1; 854 } 855 856 return 0; 857 } 858 859 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu, 860 enum smu_clk_type clk_type) 861 { 862 enum smu_feature_mask feature_id = 0; 863 864 switch (clk_type) { 865 case SMU_MCLK: 866 case SMU_UCLK: 867 case SMU_FCLK: 868 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 869 break; 870 case SMU_GFXCLK: 871 case SMU_SCLK: 872 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 873 break; 874 case SMU_SOCCLK: 875 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 876 break; 877 case SMU_VCLK: 878 case SMU_DCLK: 879 feature_id = SMU_FEATURE_VCN_DPM_BIT; 880 break; 881 default: 882 return true; 883 } 884 885 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 886 return false; 887 888 return true; 889 } 890 891 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, 892 enum smu_clk_type clk_type, 893 uint32_t *min, 894 uint32_t *max) 895 { 896 int ret = 0; 897 uint32_t soc_mask; 898 uint32_t vclk_mask; 899 uint32_t dclk_mask; 900 uint32_t mclk_mask; 901 uint32_t fclk_mask; 902 uint32_t clock_limit; 903 904 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) { 905 switch (clk_type) { 906 case SMU_MCLK: 907 case SMU_UCLK: 908 clock_limit = smu->smu_table.boot_values.uclk; 909 break; 910 case SMU_FCLK: 911 clock_limit = smu->smu_table.boot_values.fclk; 912 break; 913 case SMU_GFXCLK: 914 case SMU_SCLK: 915 clock_limit = smu->smu_table.boot_values.gfxclk; 916 break; 917 case SMU_SOCCLK: 918 clock_limit = smu->smu_table.boot_values.socclk; 919 break; 920 case SMU_VCLK: 921 clock_limit = smu->smu_table.boot_values.vclk; 922 break; 923 case SMU_DCLK: 924 clock_limit = smu->smu_table.boot_values.dclk; 925 break; 926 default: 927 clock_limit = 0; 928 break; 929 } 930 931 /* clock in Mhz unit */ 932 if (min) 933 *min = clock_limit / 100; 934 if (max) 935 *max = clock_limit / 100; 936 937 return 0; 938 } 939 if (max) { 940 ret = vangogh_get_profiling_clk_mask(smu, 941 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, 942 &vclk_mask, 943 &dclk_mask, 944 &mclk_mask, 945 &fclk_mask, 946 &soc_mask); 947 if (ret) 948 goto failed; 949 950 switch (clk_type) { 951 case SMU_UCLK: 952 case SMU_MCLK: 953 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); 954 if (ret) 955 goto failed; 956 break; 957 case SMU_SOCCLK: 958 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max); 959 if (ret) 960 goto failed; 961 break; 962 case SMU_FCLK: 963 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max); 964 if (ret) 965 goto failed; 966 break; 967 case SMU_VCLK: 968 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max); 969 if (ret) 970 goto failed; 971 break; 972 case SMU_DCLK: 973 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max); 974 if (ret) 975 goto failed; 976 break; 977 default: 978 ret = -EINVAL; 979 goto failed; 980 } 981 } 982 if (min) { 983 ret = vangogh_get_profiling_clk_mask(smu, 984 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK, 985 NULL, 986 NULL, 987 &mclk_mask, 988 &fclk_mask, 989 &soc_mask); 990 if (ret) 991 goto failed; 992 993 vclk_mask = dclk_mask = 0; 994 995 switch (clk_type) { 996 case SMU_UCLK: 997 case SMU_MCLK: 998 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min); 999 if (ret) 1000 goto failed; 1001 break; 1002 case SMU_SOCCLK: 1003 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min); 1004 if (ret) 1005 goto failed; 1006 break; 1007 case SMU_FCLK: 1008 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min); 1009 if (ret) 1010 goto failed; 1011 break; 1012 case SMU_VCLK: 1013 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min); 1014 if (ret) 1015 goto failed; 1016 break; 1017 case SMU_DCLK: 1018 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min); 1019 if (ret) 1020 goto failed; 1021 break; 1022 default: 1023 ret = -EINVAL; 1024 goto failed; 1025 } 1026 } 1027 failed: 1028 return ret; 1029 } 1030 1031 static int vangogh_get_power_profile_mode(struct smu_context *smu, 1032 char *buf) 1033 { 1034 uint32_t i, size = 0; 1035 int16_t workload_type = 0; 1036 1037 if (!buf) 1038 return -EINVAL; 1039 1040 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 1041 /* 1042 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT 1043 * Not all profile modes are supported on vangogh. 1044 */ 1045 workload_type = smu_cmn_to_asic_specific_index(smu, 1046 CMN2ASIC_MAPPING_WORKLOAD, 1047 i); 1048 1049 if (workload_type < 0) 1050 continue; 1051 1052 size += sysfs_emit_at(buf, size, "%2d %14s%s\n", 1053 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1054 } 1055 1056 return size; 1057 } 1058 1059 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 1060 { 1061 int workload_type, ret; 1062 uint32_t profile_mode = input[size]; 1063 1064 if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { 1065 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); 1066 return -EINVAL; 1067 } 1068 1069 if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || 1070 profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) 1071 return 0; 1072 1073 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1074 workload_type = smu_cmn_to_asic_specific_index(smu, 1075 CMN2ASIC_MAPPING_WORKLOAD, 1076 profile_mode); 1077 if (workload_type < 0) { 1078 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", 1079 profile_mode); 1080 return -EINVAL; 1081 } 1082 1083 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 1084 smu->workload_mask, 1085 NULL); 1086 if (ret) { 1087 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", 1088 workload_type); 1089 return ret; 1090 } 1091 1092 smu_cmn_assign_power_profile(smu); 1093 1094 return 0; 1095 } 1096 1097 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, 1098 enum smu_clk_type clk_type, 1099 uint32_t min, 1100 uint32_t max, 1101 bool automatic) 1102 { 1103 int ret = 0; 1104 1105 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) 1106 return 0; 1107 1108 switch (clk_type) { 1109 case SMU_GFXCLK: 1110 case SMU_SCLK: 1111 ret = smu_cmn_send_smc_msg_with_param(smu, 1112 SMU_MSG_SetHardMinGfxClk, 1113 min, NULL); 1114 if (ret) 1115 return ret; 1116 1117 ret = smu_cmn_send_smc_msg_with_param(smu, 1118 SMU_MSG_SetSoftMaxGfxClk, 1119 max, NULL); 1120 if (ret) 1121 return ret; 1122 break; 1123 case SMU_FCLK: 1124 ret = smu_cmn_send_smc_msg_with_param(smu, 1125 SMU_MSG_SetHardMinFclkByFreq, 1126 min, NULL); 1127 if (ret) 1128 return ret; 1129 1130 ret = smu_cmn_send_smc_msg_with_param(smu, 1131 SMU_MSG_SetSoftMaxFclkByFreq, 1132 max, NULL); 1133 if (ret) 1134 return ret; 1135 break; 1136 case SMU_SOCCLK: 1137 ret = smu_cmn_send_smc_msg_with_param(smu, 1138 SMU_MSG_SetHardMinSocclkByFreq, 1139 min, NULL); 1140 if (ret) 1141 return ret; 1142 1143 ret = smu_cmn_send_smc_msg_with_param(smu, 1144 SMU_MSG_SetSoftMaxSocclkByFreq, 1145 max, NULL); 1146 if (ret) 1147 return ret; 1148 break; 1149 case SMU_VCLK: 1150 ret = smu_cmn_send_smc_msg_with_param(smu, 1151 SMU_MSG_SetHardMinVcn, 1152 min << 16, NULL); 1153 if (ret) 1154 return ret; 1155 ret = smu_cmn_send_smc_msg_with_param(smu, 1156 SMU_MSG_SetSoftMaxVcn, 1157 max << 16, NULL); 1158 if (ret) 1159 return ret; 1160 break; 1161 case SMU_DCLK: 1162 ret = smu_cmn_send_smc_msg_with_param(smu, 1163 SMU_MSG_SetHardMinVcn, 1164 min, NULL); 1165 if (ret) 1166 return ret; 1167 ret = smu_cmn_send_smc_msg_with_param(smu, 1168 SMU_MSG_SetSoftMaxVcn, 1169 max, NULL); 1170 if (ret) 1171 return ret; 1172 break; 1173 default: 1174 return -EINVAL; 1175 } 1176 1177 return ret; 1178 } 1179 1180 static int vangogh_force_clk_levels(struct smu_context *smu, 1181 enum smu_clk_type clk_type, uint32_t mask) 1182 { 1183 uint32_t soft_min_level = 0, soft_max_level = 0; 1184 uint32_t min_freq = 0, max_freq = 0; 1185 int ret = 0 ; 1186 1187 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1188 soft_max_level = mask ? (fls(mask) - 1) : 0; 1189 1190 switch (clk_type) { 1191 case SMU_SOCCLK: 1192 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1193 soft_min_level, &min_freq); 1194 if (ret) 1195 return ret; 1196 ret = vangogh_get_dpm_clk_limited(smu, clk_type, 1197 soft_max_level, &max_freq); 1198 if (ret) 1199 return ret; 1200 ret = smu_cmn_send_smc_msg_with_param(smu, 1201 SMU_MSG_SetSoftMaxSocclkByFreq, 1202 max_freq, NULL); 1203 if (ret) 1204 return ret; 1205 ret = smu_cmn_send_smc_msg_with_param(smu, 1206 SMU_MSG_SetHardMinSocclkByFreq, 1207 min_freq, NULL); 1208 if (ret) 1209 return ret; 1210 break; 1211 case SMU_FCLK: 1212 ret = vangogh_get_dpm_clk_limited(smu, 1213 clk_type, soft_min_level, &min_freq); 1214 if (ret) 1215 return ret; 1216 ret = vangogh_get_dpm_clk_limited(smu, 1217 clk_type, soft_max_level, &max_freq); 1218 if (ret) 1219 return ret; 1220 ret = smu_cmn_send_smc_msg_with_param(smu, 1221 SMU_MSG_SetSoftMaxFclkByFreq, 1222 max_freq, NULL); 1223 if (ret) 1224 return ret; 1225 ret = smu_cmn_send_smc_msg_with_param(smu, 1226 SMU_MSG_SetHardMinFclkByFreq, 1227 min_freq, NULL); 1228 if (ret) 1229 return ret; 1230 break; 1231 case SMU_VCLK: 1232 ret = vangogh_get_dpm_clk_limited(smu, 1233 clk_type, soft_min_level, &min_freq); 1234 if (ret) 1235 return ret; 1236 1237 ret = vangogh_get_dpm_clk_limited(smu, 1238 clk_type, soft_max_level, &max_freq); 1239 if (ret) 1240 return ret; 1241 1242 1243 ret = smu_cmn_send_smc_msg_with_param(smu, 1244 SMU_MSG_SetHardMinVcn, 1245 min_freq << 16, NULL); 1246 if (ret) 1247 return ret; 1248 1249 ret = smu_cmn_send_smc_msg_with_param(smu, 1250 SMU_MSG_SetSoftMaxVcn, 1251 max_freq << 16, NULL); 1252 if (ret) 1253 return ret; 1254 1255 break; 1256 case SMU_DCLK: 1257 ret = vangogh_get_dpm_clk_limited(smu, 1258 clk_type, soft_min_level, &min_freq); 1259 if (ret) 1260 return ret; 1261 1262 ret = vangogh_get_dpm_clk_limited(smu, 1263 clk_type, soft_max_level, &max_freq); 1264 if (ret) 1265 return ret; 1266 1267 ret = smu_cmn_send_smc_msg_with_param(smu, 1268 SMU_MSG_SetHardMinVcn, 1269 min_freq, NULL); 1270 if (ret) 1271 return ret; 1272 1273 ret = smu_cmn_send_smc_msg_with_param(smu, 1274 SMU_MSG_SetSoftMaxVcn, 1275 max_freq, NULL); 1276 if (ret) 1277 return ret; 1278 1279 break; 1280 default: 1281 break; 1282 } 1283 1284 return ret; 1285 } 1286 1287 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) 1288 { 1289 int ret = 0, i = 0; 1290 uint32_t min_freq, max_freq, force_freq; 1291 enum smu_clk_type clk_type; 1292 1293 enum smu_clk_type clks[] = { 1294 SMU_SOCCLK, 1295 SMU_VCLK, 1296 SMU_DCLK, 1297 SMU_FCLK, 1298 }; 1299 1300 for (i = 0; i < ARRAY_SIZE(clks); i++) { 1301 clk_type = clks[i]; 1302 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1303 if (ret) 1304 return ret; 1305 1306 force_freq = highest ? max_freq : min_freq; 1307 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); 1308 if (ret) 1309 return ret; 1310 } 1311 1312 return ret; 1313 } 1314 1315 static int vangogh_unforce_dpm_levels(struct smu_context *smu) 1316 { 1317 int ret = 0, i = 0; 1318 uint32_t min_freq, max_freq; 1319 enum smu_clk_type clk_type; 1320 1321 struct clk_feature_map { 1322 enum smu_clk_type clk_type; 1323 uint32_t feature; 1324 } clk_feature_map[] = { 1325 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, 1326 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 1327 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, 1328 {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT}, 1329 }; 1330 1331 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { 1332 1333 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) 1334 continue; 1335 1336 clk_type = clk_feature_map[i].clk_type; 1337 1338 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 1339 1340 if (ret) 1341 return ret; 1342 1343 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); 1344 1345 if (ret) 1346 return ret; 1347 } 1348 1349 return ret; 1350 } 1351 1352 static int vangogh_set_peak_clock_by_device(struct smu_context *smu) 1353 { 1354 int ret = 0; 1355 uint32_t socclk_freq = 0, fclk_freq = 0; 1356 uint32_t vclk_freq = 0, dclk_freq = 0; 1357 1358 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq); 1359 if (ret) 1360 return ret; 1361 1362 ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq, false); 1363 if (ret) 1364 return ret; 1365 1366 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq); 1367 if (ret) 1368 return ret; 1369 1370 ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq, false); 1371 if (ret) 1372 return ret; 1373 1374 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq); 1375 if (ret) 1376 return ret; 1377 1378 ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq, false); 1379 if (ret) 1380 return ret; 1381 1382 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq); 1383 if (ret) 1384 return ret; 1385 1386 ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq, false); 1387 if (ret) 1388 return ret; 1389 1390 return ret; 1391 } 1392 1393 static int vangogh_set_performance_level(struct smu_context *smu, 1394 enum amd_dpm_forced_level level) 1395 { 1396 int ret = 0, i; 1397 uint32_t soc_mask, mclk_mask, fclk_mask; 1398 uint32_t vclk_mask = 0, dclk_mask = 0; 1399 1400 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 1401 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 1402 1403 switch (level) { 1404 case AMD_DPM_FORCED_LEVEL_HIGH: 1405 smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; 1406 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1407 1408 1409 ret = vangogh_force_dpm_limit_value(smu, true); 1410 if (ret) 1411 return ret; 1412 break; 1413 case AMD_DPM_FORCED_LEVEL_LOW: 1414 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1415 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1416 1417 ret = vangogh_force_dpm_limit_value(smu, false); 1418 if (ret) 1419 return ret; 1420 break; 1421 case AMD_DPM_FORCED_LEVEL_AUTO: 1422 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1423 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1424 1425 ret = vangogh_unforce_dpm_levels(smu); 1426 if (ret) 1427 return ret; 1428 break; 1429 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1430 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1431 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; 1432 1433 ret = vangogh_get_profiling_clk_mask(smu, level, 1434 &vclk_mask, 1435 &dclk_mask, 1436 &mclk_mask, 1437 &fclk_mask, 1438 &soc_mask); 1439 if (ret) 1440 return ret; 1441 1442 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1443 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); 1444 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); 1445 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); 1446 break; 1447 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1448 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1449 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; 1450 break; 1451 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1452 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1453 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1454 1455 ret = vangogh_get_profiling_clk_mask(smu, level, 1456 NULL, 1457 NULL, 1458 &mclk_mask, 1459 &fclk_mask, 1460 NULL); 1461 if (ret) 1462 return ret; 1463 1464 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1465 break; 1466 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1467 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1468 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; 1469 1470 ret = vangogh_set_peak_clock_by_device(smu); 1471 if (ret) 1472 return ret; 1473 break; 1474 case AMD_DPM_FORCED_LEVEL_MANUAL: 1475 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1476 default: 1477 return 0; 1478 } 1479 1480 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1481 smu->gfx_actual_hard_min_freq, NULL); 1482 if (ret) 1483 return ret; 1484 1485 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1486 smu->gfx_actual_soft_max_freq, NULL); 1487 if (ret) 1488 return ret; 1489 1490 if (smu->adev->pm.fw_version >= 0x43f1b00) { 1491 for (i = 0; i < smu->cpu_core_num; i++) { 1492 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 1493 ((i << 20) 1494 | smu->cpu_actual_soft_min_freq), 1495 NULL); 1496 if (ret) 1497 return ret; 1498 1499 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 1500 ((i << 20) 1501 | smu->cpu_actual_soft_max_freq), 1502 NULL); 1503 if (ret) 1504 return ret; 1505 } 1506 } 1507 1508 return ret; 1509 } 1510 1511 static int vangogh_read_sensor(struct smu_context *smu, 1512 enum amd_pp_sensors sensor, 1513 void *data, uint32_t *size) 1514 { 1515 int ret = 0; 1516 1517 if (!data || !size) 1518 return -EINVAL; 1519 1520 switch (sensor) { 1521 case AMDGPU_PP_SENSOR_GPU_LOAD: 1522 ret = vangogh_common_get_smu_metrics_data(smu, 1523 METRICS_AVERAGE_GFXACTIVITY, 1524 (uint32_t *)data); 1525 *size = 4; 1526 break; 1527 case AMDGPU_PP_SENSOR_VCN_LOAD: 1528 ret = vangogh_common_get_smu_metrics_data(smu, 1529 METRICS_AVERAGE_VCNACTIVITY, 1530 (uint32_t *)data); 1531 *size = 4; 1532 break; 1533 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 1534 ret = vangogh_common_get_smu_metrics_data(smu, 1535 METRICS_AVERAGE_SOCKETPOWER, 1536 (uint32_t *)data); 1537 *size = 4; 1538 break; 1539 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 1540 ret = vangogh_common_get_smu_metrics_data(smu, 1541 METRICS_CURR_SOCKETPOWER, 1542 (uint32_t *)data); 1543 *size = 4; 1544 break; 1545 case AMDGPU_PP_SENSOR_EDGE_TEMP: 1546 ret = vangogh_common_get_smu_metrics_data(smu, 1547 METRICS_TEMPERATURE_EDGE, 1548 (uint32_t *)data); 1549 *size = 4; 1550 break; 1551 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1552 ret = vangogh_common_get_smu_metrics_data(smu, 1553 METRICS_TEMPERATURE_HOTSPOT, 1554 (uint32_t *)data); 1555 *size = 4; 1556 break; 1557 case AMDGPU_PP_SENSOR_GFX_MCLK: 1558 ret = vangogh_common_get_smu_metrics_data(smu, 1559 METRICS_CURR_UCLK, 1560 (uint32_t *)data); 1561 *(uint32_t *)data *= 100; 1562 *size = 4; 1563 break; 1564 case AMDGPU_PP_SENSOR_GFX_SCLK: 1565 ret = vangogh_common_get_smu_metrics_data(smu, 1566 METRICS_CURR_GFXCLK, 1567 (uint32_t *)data); 1568 *(uint32_t *)data *= 100; 1569 *size = 4; 1570 break; 1571 case AMDGPU_PP_SENSOR_VDDGFX: 1572 ret = vangogh_common_get_smu_metrics_data(smu, 1573 METRICS_VOLTAGE_VDDGFX, 1574 (uint32_t *)data); 1575 *size = 4; 1576 break; 1577 case AMDGPU_PP_SENSOR_VDDNB: 1578 ret = vangogh_common_get_smu_metrics_data(smu, 1579 METRICS_VOLTAGE_VDDSOC, 1580 (uint32_t *)data); 1581 *size = 4; 1582 break; 1583 case AMDGPU_PP_SENSOR_CPU_CLK: 1584 ret = vangogh_common_get_smu_metrics_data(smu, 1585 METRICS_AVERAGE_CPUCLK, 1586 (uint32_t *)data); 1587 *size = smu->cpu_core_num * sizeof(uint16_t); 1588 break; 1589 default: 1590 ret = -EOPNOTSUPP; 1591 break; 1592 } 1593 1594 return ret; 1595 } 1596 1597 static int vangogh_get_apu_thermal_limit(struct smu_context *smu, uint32_t *limit) 1598 { 1599 return smu_cmn_send_smc_msg_with_param(smu, 1600 SMU_MSG_GetThermalLimit, 1601 0, limit); 1602 } 1603 1604 static int vangogh_set_apu_thermal_limit(struct smu_context *smu, uint32_t limit) 1605 { 1606 return smu_cmn_send_smc_msg_with_param(smu, 1607 SMU_MSG_SetReducedThermalLimit, 1608 limit, NULL); 1609 } 1610 1611 1612 static int vangogh_set_watermarks_table(struct smu_context *smu, 1613 struct pp_smu_wm_range_sets *clock_ranges) 1614 { 1615 int i; 1616 int ret = 0; 1617 Watermarks_t *table = smu->smu_table.watermarks_table; 1618 1619 if (!table || !clock_ranges) 1620 return -EINVAL; 1621 1622 if (clock_ranges) { 1623 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 1624 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 1625 return -EINVAL; 1626 1627 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 1628 table->WatermarkRow[WM_DCFCLK][i].MinClock = 1629 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 1630 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 1631 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 1632 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 1633 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 1634 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 1635 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 1636 1637 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 1638 clock_ranges->reader_wm_sets[i].wm_inst; 1639 } 1640 1641 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 1642 table->WatermarkRow[WM_SOCCLK][i].MinClock = 1643 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 1644 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 1645 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 1646 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 1647 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 1648 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 1649 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 1650 1651 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 1652 clock_ranges->writer_wm_sets[i].wm_inst; 1653 } 1654 1655 smu->watermarks_bitmap |= WATERMARKS_EXIST; 1656 } 1657 1658 /* pass data to smu controller */ 1659 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1660 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1661 ret = smu_cmn_write_watermarks_table(smu); 1662 if (ret) { 1663 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 1664 return ret; 1665 } 1666 smu->watermarks_bitmap |= WATERMARKS_LOADED; 1667 } 1668 1669 return 0; 1670 } 1671 1672 static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu, 1673 void **table) 1674 { 1675 struct smu_table_context *smu_table = &smu->smu_table; 1676 struct gpu_metrics_v2_3 *gpu_metrics = 1677 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; 1678 SmuMetrics_legacy_t metrics; 1679 int ret = 0; 1680 1681 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1682 if (ret) 1683 return ret; 1684 1685 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); 1686 1687 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1688 gpu_metrics->temperature_soc = metrics.SocTemperature; 1689 memcpy(&gpu_metrics->temperature_core[0], 1690 &metrics.CoreTemperature[0], 1691 sizeof(uint16_t) * 4); 1692 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1693 1694 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1695 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1696 1697 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1698 gpu_metrics->average_cpu_power = metrics.Power[0]; 1699 gpu_metrics->average_soc_power = metrics.Power[1]; 1700 gpu_metrics->average_gfx_power = metrics.Power[2]; 1701 memcpy(&gpu_metrics->average_core_power[0], 1702 &metrics.CorePower[0], 1703 sizeof(uint16_t) * 4); 1704 1705 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1706 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1707 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1708 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1709 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1710 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1711 1712 memcpy(&gpu_metrics->current_coreclk[0], 1713 &metrics.CoreFrequency[0], 1714 sizeof(uint16_t) * 4); 1715 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1716 1717 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1718 gpu_metrics->indep_throttle_status = 1719 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1720 vangogh_throttler_map); 1721 1722 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1723 1724 *table = (void *)gpu_metrics; 1725 1726 return sizeof(struct gpu_metrics_v2_3); 1727 } 1728 1729 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, 1730 void **table) 1731 { 1732 struct smu_table_context *smu_table = &smu->smu_table; 1733 struct gpu_metrics_v2_2 *gpu_metrics = 1734 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1735 SmuMetrics_legacy_t metrics; 1736 int ret = 0; 1737 1738 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1739 if (ret) 1740 return ret; 1741 1742 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1743 1744 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1745 gpu_metrics->temperature_soc = metrics.SocTemperature; 1746 memcpy(&gpu_metrics->temperature_core[0], 1747 &metrics.CoreTemperature[0], 1748 sizeof(uint16_t) * 4); 1749 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1750 1751 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1752 gpu_metrics->average_mm_activity = metrics.UvdActivity; 1753 1754 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1755 gpu_metrics->average_cpu_power = metrics.Power[0]; 1756 gpu_metrics->average_soc_power = metrics.Power[1]; 1757 gpu_metrics->average_gfx_power = metrics.Power[2]; 1758 memcpy(&gpu_metrics->average_core_power[0], 1759 &metrics.CorePower[0], 1760 sizeof(uint16_t) * 4); 1761 1762 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1763 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1764 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1765 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1766 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1767 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1768 1769 memcpy(&gpu_metrics->current_coreclk[0], 1770 &metrics.CoreFrequency[0], 1771 sizeof(uint16_t) * 4); 1772 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1773 1774 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1775 gpu_metrics->indep_throttle_status = 1776 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1777 vangogh_throttler_map); 1778 1779 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1780 1781 *table = (void *)gpu_metrics; 1782 1783 return sizeof(struct gpu_metrics_v2_2); 1784 } 1785 1786 static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu, 1787 void **table) 1788 { 1789 struct smu_table_context *smu_table = &smu->smu_table; 1790 struct gpu_metrics_v2_3 *gpu_metrics = 1791 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; 1792 SmuMetrics_t metrics; 1793 int ret = 0; 1794 1795 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1796 if (ret) 1797 return ret; 1798 1799 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); 1800 1801 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1802 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1803 memcpy(&gpu_metrics->temperature_core[0], 1804 &metrics.Current.CoreTemperature[0], 1805 sizeof(uint16_t) * 4); 1806 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1807 1808 gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature; 1809 gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature; 1810 memcpy(&gpu_metrics->average_temperature_core[0], 1811 &metrics.Average.CoreTemperature[0], 1812 sizeof(uint16_t) * 4); 1813 gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0]; 1814 1815 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1816 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1817 1818 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1819 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1820 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1821 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1822 memcpy(&gpu_metrics->average_core_power[0], 1823 &metrics.Average.CorePower[0], 1824 sizeof(uint16_t) * 4); 1825 1826 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1827 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1828 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1829 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1830 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1831 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1832 1833 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1834 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1835 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1836 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1837 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1838 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1839 1840 memcpy(&gpu_metrics->current_coreclk[0], 1841 &metrics.Current.CoreFrequency[0], 1842 sizeof(uint16_t) * 4); 1843 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1844 1845 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1846 gpu_metrics->indep_throttle_status = 1847 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1848 vangogh_throttler_map); 1849 1850 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1851 1852 *table = (void *)gpu_metrics; 1853 1854 return sizeof(struct gpu_metrics_v2_3); 1855 } 1856 1857 static ssize_t vangogh_get_gpu_metrics_v2_4(struct smu_context *smu, 1858 void **table) 1859 { 1860 SmuMetrics_t metrics; 1861 struct smu_table_context *smu_table = &smu->smu_table; 1862 struct gpu_metrics_v2_4 *gpu_metrics = 1863 (struct gpu_metrics_v2_4 *)smu_table->gpu_metrics_table; 1864 int ret = 0; 1865 1866 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1867 if (ret) 1868 return ret; 1869 1870 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 4); 1871 1872 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1873 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1874 memcpy(&gpu_metrics->temperature_core[0], 1875 &metrics.Current.CoreTemperature[0], 1876 sizeof(uint16_t) * 4); 1877 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1878 1879 gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature; 1880 gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature; 1881 memcpy(&gpu_metrics->average_temperature_core[0], 1882 &metrics.Average.CoreTemperature[0], 1883 sizeof(uint16_t) * 4); 1884 gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0]; 1885 1886 gpu_metrics->average_gfx_activity = metrics.Average.GfxActivity; 1887 gpu_metrics->average_mm_activity = metrics.Average.UvdActivity; 1888 1889 gpu_metrics->average_socket_power = metrics.Average.CurrentSocketPower; 1890 gpu_metrics->average_cpu_power = metrics.Average.Power[0]; 1891 gpu_metrics->average_soc_power = metrics.Average.Power[1]; 1892 gpu_metrics->average_gfx_power = metrics.Average.Power[2]; 1893 1894 gpu_metrics->average_cpu_voltage = metrics.Average.Voltage[0]; 1895 gpu_metrics->average_soc_voltage = metrics.Average.Voltage[1]; 1896 gpu_metrics->average_gfx_voltage = metrics.Average.Voltage[2]; 1897 1898 gpu_metrics->average_cpu_current = metrics.Average.Current[0]; 1899 gpu_metrics->average_soc_current = metrics.Average.Current[1]; 1900 gpu_metrics->average_gfx_current = metrics.Average.Current[2]; 1901 1902 memcpy(&gpu_metrics->average_core_power[0], 1903 &metrics.Average.CorePower[0], 1904 sizeof(uint16_t) * 4); 1905 1906 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1907 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1908 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1909 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1910 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1911 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1912 1913 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1914 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1915 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1916 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1917 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1918 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1919 1920 memcpy(&gpu_metrics->current_coreclk[0], 1921 &metrics.Current.CoreFrequency[0], 1922 sizeof(uint16_t) * 4); 1923 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1924 1925 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1926 gpu_metrics->indep_throttle_status = 1927 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1928 vangogh_throttler_map); 1929 1930 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1931 1932 *table = (void *)gpu_metrics; 1933 1934 return sizeof(struct gpu_metrics_v2_4); 1935 } 1936 1937 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, 1938 void **table) 1939 { 1940 struct smu_table_context *smu_table = &smu->smu_table; 1941 struct gpu_metrics_v2_2 *gpu_metrics = 1942 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1943 SmuMetrics_t metrics; 1944 int ret = 0; 1945 1946 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1947 if (ret) 1948 return ret; 1949 1950 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1951 1952 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1953 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1954 memcpy(&gpu_metrics->temperature_core[0], 1955 &metrics.Current.CoreTemperature[0], 1956 sizeof(uint16_t) * 4); 1957 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1958 1959 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1960 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1961 1962 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1963 gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1964 gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1965 gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1966 memcpy(&gpu_metrics->average_core_power[0], 1967 &metrics.Average.CorePower[0], 1968 sizeof(uint16_t) * 4); 1969 1970 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1971 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1972 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1973 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1974 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1975 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1976 1977 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1978 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1979 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1980 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1981 gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1982 gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1983 1984 memcpy(&gpu_metrics->current_coreclk[0], 1985 &metrics.Current.CoreFrequency[0], 1986 sizeof(uint16_t) * 4); 1987 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1988 1989 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1990 gpu_metrics->indep_throttle_status = 1991 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1992 vangogh_throttler_map); 1993 1994 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1995 1996 *table = (void *)gpu_metrics; 1997 1998 return sizeof(struct gpu_metrics_v2_2); 1999 } 2000 2001 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, 2002 void **table) 2003 { 2004 uint32_t smu_program; 2005 uint32_t fw_version; 2006 int ret = 0; 2007 2008 smu_program = (smu->smc_fw_version >> 24) & 0xff; 2009 fw_version = smu->smc_fw_version & 0xffffff; 2010 if (smu_program == 6) { 2011 if (fw_version >= 0x3F0800) 2012 ret = vangogh_get_gpu_metrics_v2_4(smu, table); 2013 else 2014 ret = vangogh_get_gpu_metrics_v2_3(smu, table); 2015 2016 } else { 2017 if (smu->smc_fw_version >= 0x043F3E00) { 2018 if (smu->smc_fw_if_version < 0x3) 2019 ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); 2020 else 2021 ret = vangogh_get_gpu_metrics_v2_3(smu, table); 2022 } else { 2023 if (smu->smc_fw_if_version < 0x3) 2024 ret = vangogh_get_legacy_gpu_metrics(smu, table); 2025 else 2026 ret = vangogh_get_gpu_metrics(smu, table); 2027 } 2028 } 2029 2030 return ret; 2031 } 2032 2033 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, 2034 long input[], uint32_t size) 2035 { 2036 int ret = 0; 2037 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2038 2039 if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { 2040 dev_warn(smu->adev->dev, 2041 "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); 2042 return -EINVAL; 2043 } 2044 2045 switch (type) { 2046 case PP_OD_EDIT_CCLK_VDDC_TABLE: 2047 if (size != 3) { 2048 dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n"); 2049 return -EINVAL; 2050 } 2051 if (input[0] >= smu->cpu_core_num) { 2052 dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n", 2053 smu->cpu_core_num); 2054 } 2055 smu->cpu_core_id_select = input[0]; 2056 if (input[1] == 0) { 2057 if (input[2] < smu->cpu_default_soft_min_freq) { 2058 dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 2059 input[2], smu->cpu_default_soft_min_freq); 2060 return -EINVAL; 2061 } 2062 smu->cpu_actual_soft_min_freq = input[2]; 2063 } else if (input[1] == 1) { 2064 if (input[2] > smu->cpu_default_soft_max_freq) { 2065 dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 2066 input[2], smu->cpu_default_soft_max_freq); 2067 return -EINVAL; 2068 } 2069 smu->cpu_actual_soft_max_freq = input[2]; 2070 } else { 2071 return -EINVAL; 2072 } 2073 break; 2074 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2075 if (size != 2) { 2076 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2077 return -EINVAL; 2078 } 2079 2080 if (input[0] == 0) { 2081 if (input[1] < smu->gfx_default_hard_min_freq) { 2082 dev_warn(smu->adev->dev, 2083 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 2084 input[1], smu->gfx_default_hard_min_freq); 2085 return -EINVAL; 2086 } 2087 smu->gfx_actual_hard_min_freq = input[1]; 2088 } else if (input[0] == 1) { 2089 if (input[1] > smu->gfx_default_soft_max_freq) { 2090 dev_warn(smu->adev->dev, 2091 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 2092 input[1], smu->gfx_default_soft_max_freq); 2093 return -EINVAL; 2094 } 2095 smu->gfx_actual_soft_max_freq = input[1]; 2096 } else { 2097 return -EINVAL; 2098 } 2099 break; 2100 case PP_OD_RESTORE_DEFAULT_TABLE: 2101 if (size != 0) { 2102 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2103 return -EINVAL; 2104 } else { 2105 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 2106 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 2107 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; 2108 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; 2109 } 2110 break; 2111 case PP_OD_COMMIT_DPM_TABLE: 2112 if (size != 0) { 2113 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2114 return -EINVAL; 2115 } else { 2116 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 2117 dev_err(smu->adev->dev, 2118 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 2119 smu->gfx_actual_hard_min_freq, 2120 smu->gfx_actual_soft_max_freq); 2121 return -EINVAL; 2122 } 2123 2124 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 2125 smu->gfx_actual_hard_min_freq, NULL); 2126 if (ret) { 2127 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 2128 return ret; 2129 } 2130 2131 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 2132 smu->gfx_actual_soft_max_freq, NULL); 2133 if (ret) { 2134 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 2135 return ret; 2136 } 2137 2138 if (smu->adev->pm.fw_version < 0x43f1b00) { 2139 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n"); 2140 break; 2141 } 2142 2143 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, 2144 ((smu->cpu_core_id_select << 20) 2145 | smu->cpu_actual_soft_min_freq), 2146 NULL); 2147 if (ret) { 2148 dev_err(smu->adev->dev, "Set hard min cclk failed!"); 2149 return ret; 2150 } 2151 2152 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, 2153 ((smu->cpu_core_id_select << 20) 2154 | smu->cpu_actual_soft_max_freq), 2155 NULL); 2156 if (ret) { 2157 dev_err(smu->adev->dev, "Set soft max cclk failed!"); 2158 return ret; 2159 } 2160 } 2161 break; 2162 default: 2163 return -ENOSYS; 2164 } 2165 2166 return ret; 2167 } 2168 2169 static int vangogh_set_default_dpm_tables(struct smu_context *smu) 2170 { 2171 struct smu_table_context *smu_table = &smu->smu_table; 2172 2173 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 2174 } 2175 2176 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 2177 { 2178 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 2179 2180 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 2181 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 2182 smu->gfx_actual_hard_min_freq = 0; 2183 smu->gfx_actual_soft_max_freq = 0; 2184 2185 smu->cpu_default_soft_min_freq = 1400; 2186 smu->cpu_default_soft_max_freq = 3500; 2187 smu->cpu_actual_soft_min_freq = 0; 2188 smu->cpu_actual_soft_max_freq = 0; 2189 2190 return 0; 2191 } 2192 2193 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) 2194 { 2195 DpmClocks_t *table = smu->smu_table.clocks_table; 2196 int i; 2197 2198 if (!clock_table || !table) 2199 return -EINVAL; 2200 2201 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { 2202 clock_table->SocClocks[i].Freq = table->SocClocks[i]; 2203 clock_table->SocClocks[i].Vol = table->SocVoltage[i]; 2204 } 2205 2206 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 2207 clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk; 2208 clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage; 2209 } 2210 2211 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 2212 clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk; 2213 clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage; 2214 } 2215 2216 return 0; 2217 } 2218 2219 static int vangogh_notify_rlc_state(struct smu_context *smu, bool en) 2220 { 2221 struct amdgpu_device *adev = smu->adev; 2222 int ret = 0; 2223 2224 if (adev->pm.fw_version >= 0x43f1700 && !en) 2225 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, 2226 RLC_STATUS_OFF, NULL); 2227 2228 return ret; 2229 } 2230 2231 static int vangogh_post_smu_init(struct smu_context *smu) 2232 { 2233 struct amdgpu_device *adev = smu->adev; 2234 uint32_t tmp; 2235 int ret = 0; 2236 uint8_t aon_bits = 0; 2237 /* Two CUs in one WGP */ 2238 uint32_t req_active_wgps = adev->gfx.cu_info.number/2; 2239 uint32_t total_cu = adev->gfx.config.max_cu_per_sh * 2240 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2241 2242 /* allow message will be sent after enable message on Vangogh*/ 2243 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && 2244 (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 2245 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); 2246 if (ret) { 2247 dev_err(adev->dev, "Failed to Enable GfxOff!\n"); 2248 return ret; 2249 } 2250 } else { 2251 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2252 dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n"); 2253 } 2254 2255 /* if all CUs are active, no need to power off any WGPs */ 2256 if (total_cu == adev->gfx.cu_info.number) 2257 return 0; 2258 2259 /* 2260 * Calculate the total bits number of always on WGPs for all SA/SEs in 2261 * RLC_PG_ALWAYS_ON_WGP_MASK. 2262 */ 2263 tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK)); 2264 tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK; 2265 2266 aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; 2267 2268 /* Do not request any WGPs less than set in the AON_WGP_MASK */ 2269 if (aon_bits > req_active_wgps) { 2270 dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n"); 2271 return 0; 2272 } else { 2273 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL); 2274 } 2275 } 2276 2277 static int vangogh_mode_reset(struct smu_context *smu, int type) 2278 { 2279 int ret = 0, index = 0; 2280 2281 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 2282 SMU_MSG_GfxDeviceDriverReset); 2283 if (index < 0) 2284 return index == -EACCES ? 0 : index; 2285 2286 mutex_lock(&smu->message_lock); 2287 2288 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); 2289 2290 mutex_unlock(&smu->message_lock); 2291 2292 mdelay(10); 2293 2294 return ret; 2295 } 2296 2297 static int vangogh_mode2_reset(struct smu_context *smu) 2298 { 2299 return vangogh_mode_reset(smu, SMU_RESET_MODE_2); 2300 } 2301 2302 /** 2303 * vangogh_get_gfxoff_status - Get gfxoff status 2304 * 2305 * @smu: amdgpu_device pointer 2306 * 2307 * Get current gfxoff status 2308 * 2309 * Return: 2310 * * 0 - GFXOFF (default if enabled). 2311 * * 1 - Transition out of GFX State. 2312 * * 2 - Not in GFXOFF. 2313 * * 3 - Transition into GFXOFF. 2314 */ 2315 static u32 vangogh_get_gfxoff_status(struct smu_context *smu) 2316 { 2317 struct amdgpu_device *adev = smu->adev; 2318 u32 reg, gfxoff_status; 2319 2320 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL); 2321 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK) 2322 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT; 2323 2324 return gfxoff_status; 2325 } 2326 2327 static int vangogh_get_power_limit(struct smu_context *smu, 2328 uint32_t *current_power_limit, 2329 uint32_t *default_power_limit, 2330 uint32_t *max_power_limit, 2331 uint32_t *min_power_limit) 2332 { 2333 struct smu_11_5_power_context *power_context = 2334 smu->smu_power.power_context; 2335 uint32_t ppt_limit; 2336 int ret = 0; 2337 2338 if (smu->adev->pm.fw_version < 0x43f1e00) 2339 return ret; 2340 2341 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit); 2342 if (ret) { 2343 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n"); 2344 return ret; 2345 } 2346 /* convert from milliwatt to watt */ 2347 if (current_power_limit) 2348 *current_power_limit = ppt_limit / 1000; 2349 if (default_power_limit) 2350 *default_power_limit = ppt_limit / 1000; 2351 if (max_power_limit) 2352 *max_power_limit = 29; 2353 if (min_power_limit) 2354 *min_power_limit = 0; 2355 2356 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); 2357 if (ret) { 2358 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n"); 2359 return ret; 2360 } 2361 /* convert from milliwatt to watt */ 2362 power_context->current_fast_ppt_limit = 2363 power_context->default_fast_ppt_limit = ppt_limit / 1000; 2364 power_context->max_fast_ppt_limit = 30; 2365 2366 return ret; 2367 } 2368 2369 static int vangogh_get_ppt_limit(struct smu_context *smu, 2370 uint32_t *ppt_limit, 2371 enum smu_ppt_limit_type type, 2372 enum smu_ppt_limit_level level) 2373 { 2374 struct smu_11_5_power_context *power_context = 2375 smu->smu_power.power_context; 2376 2377 if (!power_context) 2378 return -EOPNOTSUPP; 2379 2380 if (type == SMU_FAST_PPT_LIMIT) { 2381 switch (level) { 2382 case SMU_PPT_LIMIT_MAX: 2383 *ppt_limit = power_context->max_fast_ppt_limit; 2384 break; 2385 case SMU_PPT_LIMIT_CURRENT: 2386 *ppt_limit = power_context->current_fast_ppt_limit; 2387 break; 2388 case SMU_PPT_LIMIT_DEFAULT: 2389 *ppt_limit = power_context->default_fast_ppt_limit; 2390 break; 2391 default: 2392 break; 2393 } 2394 } 2395 2396 return 0; 2397 } 2398 2399 static int vangogh_set_power_limit(struct smu_context *smu, 2400 enum smu_ppt_limit_type limit_type, 2401 uint32_t ppt_limit) 2402 { 2403 struct smu_11_5_power_context *power_context = 2404 smu->smu_power.power_context; 2405 int ret = 0; 2406 2407 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 2408 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 2409 return -EOPNOTSUPP; 2410 } 2411 2412 switch (limit_type) { 2413 case SMU_DEFAULT_PPT_LIMIT: 2414 ret = smu_cmn_send_smc_msg_with_param(smu, 2415 SMU_MSG_SetSlowPPTLimit, 2416 ppt_limit * 1000, /* convert from watt to milliwatt */ 2417 NULL); 2418 if (ret) 2419 return ret; 2420 2421 smu->current_power_limit = ppt_limit; 2422 break; 2423 case SMU_FAST_PPT_LIMIT: 2424 ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); 2425 if (ppt_limit > power_context->max_fast_ppt_limit) { 2426 dev_err(smu->adev->dev, 2427 "New power limit (%d) is over the max allowed %d\n", 2428 ppt_limit, power_context->max_fast_ppt_limit); 2429 return ret; 2430 } 2431 2432 ret = smu_cmn_send_smc_msg_with_param(smu, 2433 SMU_MSG_SetFastPPTLimit, 2434 ppt_limit * 1000, /* convert from watt to milliwatt */ 2435 NULL); 2436 if (ret) 2437 return ret; 2438 2439 power_context->current_fast_ppt_limit = ppt_limit; 2440 break; 2441 default: 2442 return -EINVAL; 2443 } 2444 2445 return ret; 2446 } 2447 2448 /** 2449 * vangogh_set_gfxoff_residency 2450 * 2451 * @smu: amdgpu_device pointer 2452 * @start: start/stop residency log 2453 * 2454 * This function will be used to log gfxoff residency 2455 * 2456 * 2457 * Returns standard response codes. 2458 */ 2459 static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start) 2460 { 2461 int ret = 0; 2462 u32 residency; 2463 struct amdgpu_device *adev = smu->adev; 2464 2465 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2466 return 0; 2467 2468 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency, 2469 start, &residency); 2470 if (ret) 2471 return ret; 2472 2473 if (!start) 2474 adev->gfx.gfx_off_residency = residency; 2475 2476 return ret; 2477 } 2478 2479 /** 2480 * vangogh_get_gfxoff_residency 2481 * 2482 * @smu: amdgpu_device pointer 2483 * @residency: placeholder for return value 2484 * 2485 * This function will be used to get gfxoff residency. 2486 * 2487 * Returns standard response codes. 2488 */ 2489 static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency) 2490 { 2491 struct amdgpu_device *adev = smu->adev; 2492 2493 *residency = adev->gfx.gfx_off_residency; 2494 2495 return 0; 2496 } 2497 2498 /** 2499 * vangogh_get_gfxoff_entrycount - get gfxoff entry count 2500 * 2501 * @smu: amdgpu_device pointer 2502 * @entrycount: placeholder for return value 2503 * 2504 * This function will be used to get gfxoff entry count 2505 * 2506 * Returns standard response codes. 2507 */ 2508 static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount) 2509 { 2510 int ret = 0, value = 0; 2511 struct amdgpu_device *adev = smu->adev; 2512 2513 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 2514 return 0; 2515 2516 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value); 2517 *entrycount = value + adev->gfx.gfx_off_entrycount; 2518 2519 return ret; 2520 } 2521 2522 static const struct pptable_funcs vangogh_ppt_funcs = { 2523 2524 .check_fw_status = smu_v11_0_check_fw_status, 2525 .check_fw_version = smu_v11_0_check_fw_version, 2526 .init_smc_tables = vangogh_init_smc_tables, 2527 .fini_smc_tables = smu_v11_0_fini_smc_tables, 2528 .init_power = smu_v11_0_init_power, 2529 .fini_power = smu_v11_0_fini_power, 2530 .register_irq_handler = smu_v11_0_register_irq_handler, 2531 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 2532 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 2533 .send_smc_msg = smu_cmn_send_smc_msg, 2534 .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable, 2535 .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable, 2536 .is_dpm_running = vangogh_is_dpm_running, 2537 .read_sensor = vangogh_read_sensor, 2538 .get_apu_thermal_limit = vangogh_get_apu_thermal_limit, 2539 .set_apu_thermal_limit = vangogh_set_apu_thermal_limit, 2540 .get_enabled_mask = smu_cmn_get_enabled_mask, 2541 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2542 .set_watermarks_table = vangogh_set_watermarks_table, 2543 .set_driver_table_location = smu_v11_0_set_driver_table_location, 2544 .interrupt_work = smu_v11_0_interrupt_work, 2545 .get_gpu_metrics = vangogh_common_get_gpu_metrics, 2546 .od_edit_dpm_table = vangogh_od_edit_dpm_table, 2547 .print_clk_levels = vangogh_common_print_clk_levels, 2548 .set_default_dpm_table = vangogh_set_default_dpm_tables, 2549 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, 2550 .notify_rlc_state = vangogh_notify_rlc_state, 2551 .feature_is_enabled = smu_cmn_feature_is_enabled, 2552 .set_power_profile_mode = vangogh_set_power_profile_mode, 2553 .get_power_profile_mode = vangogh_get_power_profile_mode, 2554 .get_dpm_clock_table = vangogh_get_dpm_clock_table, 2555 .force_clk_levels = vangogh_force_clk_levels, 2556 .set_performance_level = vangogh_set_performance_level, 2557 .post_init = vangogh_post_smu_init, 2558 .mode2_reset = vangogh_mode2_reset, 2559 .gfx_off_control = smu_v11_0_gfx_off_control, 2560 .get_gfx_off_status = vangogh_get_gfxoff_status, 2561 .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount, 2562 .get_gfx_off_residency = vangogh_get_gfxoff_residency, 2563 .set_gfx_off_residency = vangogh_set_gfxoff_residency, 2564 .get_ppt_limit = vangogh_get_ppt_limit, 2565 .get_power_limit = vangogh_get_power_limit, 2566 .set_power_limit = vangogh_set_power_limit, 2567 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, 2568 }; 2569 2570 void vangogh_set_ppt_funcs(struct smu_context *smu) 2571 { 2572 smu->ppt_funcs = &vangogh_ppt_funcs; 2573 smu->message_map = vangogh_message_map; 2574 smu->feature_map = vangogh_feature_mask_map; 2575 smu->table_map = vangogh_table_map; 2576 smu->workload_map = vangogh_workload_map; 2577 smu->is_apu = true; 2578 smu_v11_0_set_smu_mailbox_registers(smu); 2579 } 2580