1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include "amdgpu.h" 27 #include "amdgpu_smu.h" 28 #include "smu_v12_0_ppsmc.h" 29 #include "smu12_driver_if.h" 30 #include "smu_v12_0.h" 31 #include "renoir_ppt.h" 32 #include "smu_cmn.h" 33 34 /* 35 * DO NOT use these for err/warn/info/debug messages. 36 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 37 * They are more MGPU friendly. 38 */ 39 #undef pr_err 40 #undef pr_warn 41 #undef pr_info 42 #undef pr_debug 43 44 #define mmMP1_SMN_C2PMSG_66 0x0282 45 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 46 47 #define mmMP1_SMN_C2PMSG_82 0x0292 48 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 49 50 #define mmMP1_SMN_C2PMSG_90 0x029a 51 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 52 53 static struct cmn2asic_msg_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = { 54 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 55 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 56 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 57 MSG_MAP(PowerUpGfx, PPSMC_MSG_PowerUpGfx, 1), 58 MSG_MAP(AllowGfxOff, PPSMC_MSG_EnableGfxOff, 1), 59 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff, 1), 60 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1), 61 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 1), 62 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), 63 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), 64 MSG_MAP(PowerDownSdma, PPSMC_MSG_PowerDownSdma, 1), 65 MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma, 1), 66 MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq, 1), 67 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), 68 MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch, 1), 69 MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq, 1), 70 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1), 71 MSG_MAP(SetCustomPolicy, PPSMC_MSG_SetCustomPolicy, 1), 72 MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 1), 73 MSG_MAP(NumOfDisplays, PPSMC_MSG_SetDisplayCount, 1), 74 MSG_MAP(QueryPowerLimit, PPSMC_MSG_QueryPowerLimit, 1), 75 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 76 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 77 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 78 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 79 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1), 80 MSG_MAP(SetGfxclkOverdriveByFreqVid, PPSMC_MSG_SetGfxclkOverdriveByFreqVid, 1), 81 MSG_MAP(SetHardMinDcfclkByFreq, PPSMC_MSG_SetHardMinDcfclkByFreq, 1), 82 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), 83 MSG_MAP(ControlIgpuATS, PPSMC_MSG_ControlIgpuATS, 1), 84 MSG_MAP(SetMinVideoFclkFreq, PPSMC_MSG_SetMinVideoFclkFreq, 1), 85 MSG_MAP(SetMinDeepSleepDcfclk, PPSMC_MSG_SetMinDeepSleepDcfclk, 1), 86 MSG_MAP(ForcePowerDownGfx, PPSMC_MSG_ForcePowerDownGfx, 1), 87 MSG_MAP(SetPhyclkVoltageByFreq, PPSMC_MSG_SetPhyclkVoltageByFreq, 1), 88 MSG_MAP(SetDppclkVoltageByFreq, PPSMC_MSG_SetDppclkVoltageByFreq, 1), 89 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), 90 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 1), 91 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1), 92 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1), 93 MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxclkFrequency, 1), 94 MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxclkFrequency, 1), 95 MSG_MAP(SoftReset, PPSMC_MSG_SoftReset, 1), 96 MSG_MAP(SetGfxCGPG, PPSMC_MSG_SetGfxCGPG, 1), 97 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 98 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), 99 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), 100 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), 101 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), 102 MSG_MAP(PowerGateMmHub, PPSMC_MSG_PowerGateMmHub, 1), 103 MSG_MAP(UpdatePmeRestore, PPSMC_MSG_UpdatePmeRestore, 1), 104 MSG_MAP(GpuChangeState, PPSMC_MSG_GpuChangeState, 1), 105 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1), 106 MSG_MAP(ForceGfxContentSave, PPSMC_MSG_ForceGfxContentSave, 1), 107 MSG_MAP(EnableTmdp48MHzRefclkPwrDown, PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown, 1), 108 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), 109 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), 110 MSG_MAP(PowerGateAtHub, PPSMC_MSG_PowerGateAtHub, 1), 111 MSG_MAP(SetSoftMinJpeg, PPSMC_MSG_SetSoftMinJpeg, 1), 112 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), 113 }; 114 115 static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = { 116 CLK_MAP(GFXCLK, CLOCK_GFXCLK), 117 CLK_MAP(SCLK, CLOCK_GFXCLK), 118 CLK_MAP(SOCCLK, CLOCK_SOCCLK), 119 CLK_MAP(UCLK, CLOCK_FCLK), 120 CLK_MAP(MCLK, CLOCK_FCLK), 121 CLK_MAP(VCLK, CLOCK_VCLK), 122 CLK_MAP(DCLK, CLOCK_DCLK), 123 }; 124 125 static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = { 126 TAB_MAP_VALID(WATERMARKS), 127 TAB_MAP_INVALID(CUSTOM_DPM), 128 TAB_MAP_VALID(DPMCLOCKS), 129 TAB_MAP_VALID(SMU_METRICS), 130 }; 131 132 static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 133 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 134 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 135 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 136 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 137 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 138 }; 139 140 static const uint8_t renoir_throttler_map[] = { 141 [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT), 142 [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT), 143 [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT), 144 [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT), 145 [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT), 146 [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT), 147 [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT), 148 [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT), 149 [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT), 150 [THROTTLER_STATUS_BIT_PROCHOT_CPU] = (SMU_THROTTLER_PROCHOT_CPU_BIT), 151 [THROTTLER_STATUS_BIT_PROCHOT_GFX] = (SMU_THROTTLER_PROCHOT_GFX_BIT), 152 [THROTTLER_STATUS_BIT_EDC_CPU] = (SMU_THROTTLER_EDC_CPU_BIT), 153 [THROTTLER_STATUS_BIT_EDC_GFX] = (SMU_THROTTLER_EDC_GFX_BIT), 154 }; 155 156 static int renoir_init_smc_tables(struct smu_context *smu) 157 { 158 struct smu_table_context *smu_table = &smu->smu_table; 159 struct smu_table *tables = smu_table->tables; 160 161 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 162 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 163 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 164 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 165 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 166 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 167 168 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 169 if (!smu_table->clocks_table) 170 goto err0_out; 171 172 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 173 if (!smu_table->metrics_table) 174 goto err1_out; 175 smu_table->metrics_time = 0; 176 177 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 178 if (!smu_table->watermarks_table) 179 goto err2_out; 180 181 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 182 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 183 if (!smu_table->gpu_metrics_table) 184 goto err3_out; 185 186 return 0; 187 188 err3_out: 189 kfree(smu_table->watermarks_table); 190 err2_out: 191 kfree(smu_table->metrics_table); 192 err1_out: 193 kfree(smu_table->clocks_table); 194 err0_out: 195 return -ENOMEM; 196 } 197 198 /* 199 * This interface just for getting uclk ultimate freq and should't introduce 200 * other likewise function result in overmuch callback. 201 */ 202 static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, 203 uint32_t dpm_level, uint32_t *freq) 204 { 205 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 206 207 if (!clk_table || clk_type >= SMU_CLK_COUNT) 208 return -EINVAL; 209 210 switch (clk_type) { 211 case SMU_SOCCLK: 212 if (dpm_level >= NUM_SOCCLK_DPM_LEVELS) 213 return -EINVAL; 214 *freq = clk_table->SocClocks[dpm_level].Freq; 215 break; 216 case SMU_UCLK: 217 case SMU_MCLK: 218 if (dpm_level >= NUM_FCLK_DPM_LEVELS) 219 return -EINVAL; 220 *freq = clk_table->FClocks[dpm_level].Freq; 221 break; 222 case SMU_DCEFCLK: 223 if (dpm_level >= NUM_DCFCLK_DPM_LEVELS) 224 return -EINVAL; 225 *freq = clk_table->DcfClocks[dpm_level].Freq; 226 break; 227 case SMU_FCLK: 228 if (dpm_level >= NUM_FCLK_DPM_LEVELS) 229 return -EINVAL; 230 *freq = clk_table->FClocks[dpm_level].Freq; 231 break; 232 case SMU_VCLK: 233 if (dpm_level >= NUM_VCN_DPM_LEVELS) 234 return -EINVAL; 235 *freq = clk_table->VClocks[dpm_level].Freq; 236 break; 237 case SMU_DCLK: 238 if (dpm_level >= NUM_VCN_DPM_LEVELS) 239 return -EINVAL; 240 *freq = clk_table->DClocks[dpm_level].Freq; 241 break; 242 243 default: 244 return -EINVAL; 245 } 246 247 return 0; 248 } 249 250 static int renoir_get_profiling_clk_mask(struct smu_context *smu, 251 enum amd_dpm_forced_level level, 252 uint32_t *sclk_mask, 253 uint32_t *mclk_mask, 254 uint32_t *soc_mask) 255 { 256 257 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 258 if (sclk_mask) 259 *sclk_mask = 0; 260 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 261 if (mclk_mask) 262 /* mclk levels are in reverse order */ 263 *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1; 264 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 265 if (sclk_mask) 266 /* The sclk as gfxclk and has three level about max/min/current */ 267 *sclk_mask = 3 - 1; 268 269 if (mclk_mask) 270 /* mclk levels are in reverse order */ 271 *mclk_mask = 0; 272 273 if (soc_mask) 274 *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1; 275 } 276 277 return 0; 278 } 279 280 static int renoir_get_dpm_ultimate_freq(struct smu_context *smu, 281 enum smu_clk_type clk_type, 282 uint32_t *min, 283 uint32_t *max) 284 { 285 int ret = 0; 286 uint32_t mclk_mask, soc_mask; 287 uint32_t clock_limit; 288 289 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 290 switch (clk_type) { 291 case SMU_MCLK: 292 case SMU_UCLK: 293 clock_limit = smu->smu_table.boot_values.uclk; 294 break; 295 case SMU_GFXCLK: 296 case SMU_SCLK: 297 clock_limit = smu->smu_table.boot_values.gfxclk; 298 break; 299 case SMU_SOCCLK: 300 clock_limit = smu->smu_table.boot_values.socclk; 301 break; 302 default: 303 clock_limit = 0; 304 break; 305 } 306 307 /* clock in Mhz unit */ 308 if (min) 309 *min = clock_limit / 100; 310 if (max) 311 *max = clock_limit / 100; 312 313 return 0; 314 } 315 316 if (max) { 317 ret = renoir_get_profiling_clk_mask(smu, 318 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, 319 NULL, 320 &mclk_mask, 321 &soc_mask); 322 if (ret) 323 goto failed; 324 325 switch (clk_type) { 326 case SMU_GFXCLK: 327 case SMU_SCLK: 328 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max); 329 if (ret) { 330 dev_err(smu->adev->dev, "Attempt to get max GX frequency from SMC Failed !\n"); 331 goto failed; 332 } 333 break; 334 case SMU_UCLK: 335 case SMU_FCLK: 336 case SMU_MCLK: 337 ret = renoir_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); 338 if (ret) 339 goto failed; 340 break; 341 case SMU_SOCCLK: 342 ret = renoir_get_dpm_clk_limited(smu, clk_type, soc_mask, max); 343 if (ret) 344 goto failed; 345 break; 346 default: 347 ret = -EINVAL; 348 goto failed; 349 } 350 } 351 352 if (min) { 353 switch (clk_type) { 354 case SMU_GFXCLK: 355 case SMU_SCLK: 356 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min); 357 if (ret) { 358 dev_err(smu->adev->dev, "Attempt to get min GX frequency from SMC Failed !\n"); 359 goto failed; 360 } 361 break; 362 case SMU_UCLK: 363 case SMU_FCLK: 364 case SMU_MCLK: 365 ret = renoir_get_dpm_clk_limited(smu, clk_type, NUM_MEMCLK_DPM_LEVELS - 1, min); 366 if (ret) 367 goto failed; 368 break; 369 case SMU_SOCCLK: 370 ret = renoir_get_dpm_clk_limited(smu, clk_type, 0, min); 371 if (ret) 372 goto failed; 373 break; 374 default: 375 ret = -EINVAL; 376 goto failed; 377 } 378 } 379 failed: 380 return ret; 381 } 382 383 static int renoir_od_edit_dpm_table(struct smu_context *smu, 384 enum PP_OD_DPM_TABLE_COMMAND type, 385 long input[], uint32_t size) 386 { 387 int ret = 0; 388 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 389 390 if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { 391 dev_warn(smu->adev->dev, 392 "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); 393 return -EINVAL; 394 } 395 396 switch (type) { 397 case PP_OD_EDIT_SCLK_VDDC_TABLE: 398 if (size != 2) { 399 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 400 return -EINVAL; 401 } 402 403 if (input[0] == 0) { 404 if (input[1] < smu->gfx_default_hard_min_freq) { 405 dev_warn(smu->adev->dev, 406 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 407 input[1], smu->gfx_default_hard_min_freq); 408 return -EINVAL; 409 } 410 smu->gfx_actual_hard_min_freq = input[1]; 411 } else if (input[0] == 1) { 412 if (input[1] > smu->gfx_default_soft_max_freq) { 413 dev_warn(smu->adev->dev, 414 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 415 input[1], smu->gfx_default_soft_max_freq); 416 return -EINVAL; 417 } 418 smu->gfx_actual_soft_max_freq = input[1]; 419 } else { 420 return -EINVAL; 421 } 422 break; 423 case PP_OD_RESTORE_DEFAULT_TABLE: 424 if (size != 0) { 425 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 426 return -EINVAL; 427 } 428 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 429 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 430 break; 431 case PP_OD_COMMIT_DPM_TABLE: 432 if (size != 0) { 433 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 434 return -EINVAL; 435 } else { 436 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 437 dev_err(smu->adev->dev, 438 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 439 smu->gfx_actual_hard_min_freq, 440 smu->gfx_actual_soft_max_freq); 441 return -EINVAL; 442 } 443 444 ret = smu_cmn_send_smc_msg_with_param(smu, 445 SMU_MSG_SetHardMinGfxClk, 446 smu->gfx_actual_hard_min_freq, 447 NULL); 448 if (ret) { 449 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 450 return ret; 451 } 452 453 ret = smu_cmn_send_smc_msg_with_param(smu, 454 SMU_MSG_SetSoftMaxGfxClk, 455 smu->gfx_actual_soft_max_freq, 456 NULL); 457 if (ret) { 458 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 459 return ret; 460 } 461 } 462 break; 463 default: 464 return -ENOSYS; 465 } 466 467 return ret; 468 } 469 470 static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 471 { 472 uint32_t min = 0, max = 0; 473 uint32_t ret = 0; 474 475 ret = smu_cmn_send_smc_msg_with_param(smu, 476 SMU_MSG_GetMinGfxclkFrequency, 477 0, &min); 478 if (ret) 479 return ret; 480 ret = smu_cmn_send_smc_msg_with_param(smu, 481 SMU_MSG_GetMaxGfxclkFrequency, 482 0, &max); 483 if (ret) 484 return ret; 485 486 smu->gfx_default_hard_min_freq = min; 487 smu->gfx_default_soft_max_freq = max; 488 smu->gfx_actual_hard_min_freq = 0; 489 smu->gfx_actual_soft_max_freq = 0; 490 491 return 0; 492 } 493 494 static int renoir_print_clk_levels(struct smu_context *smu, 495 enum smu_clk_type clk_type, char *buf) 496 { 497 int i, idx, size = 0, ret = 0; 498 uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; 499 SmuMetrics_t metrics; 500 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 501 bool cur_value_match_level = false; 502 503 memset(&metrics, 0, sizeof(metrics)); 504 505 ret = smu_cmn_get_metrics_table(smu, &metrics, false); 506 if (ret) 507 return ret; 508 509 smu_cmn_get_sysfs_buf(&buf, &size); 510 511 switch (clk_type) { 512 case SMU_OD_RANGE: 513 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 514 ret = smu_cmn_send_smc_msg_with_param(smu, 515 SMU_MSG_GetMinGfxclkFrequency, 516 0, &min); 517 if (ret) 518 return ret; 519 ret = smu_cmn_send_smc_msg_with_param(smu, 520 SMU_MSG_GetMaxGfxclkFrequency, 521 0, &max); 522 if (ret) 523 return ret; 524 size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); 525 } 526 break; 527 case SMU_OD_SCLK: 528 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 529 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 530 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 531 size += sysfs_emit_at(buf, size, "OD_SCLK\n"); 532 size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); 533 size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); 534 } 535 break; 536 case SMU_GFXCLK: 537 case SMU_SCLK: 538 /* retirve table returned paramters unit is MHz */ 539 cur_value = metrics.ClockFrequency[CLOCK_GFXCLK]; 540 ret = renoir_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min, &max); 541 if (!ret) { 542 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ 543 if (cur_value == max) 544 i = 2; 545 else if (cur_value == min) 546 i = 0; 547 else 548 i = 1; 549 550 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 551 i == 0 ? "*" : ""); 552 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 553 i == 1 ? cur_value : RENOIR_UMD_PSTATE_GFXCLK, 554 i == 1 ? "*" : ""); 555 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 556 i == 2 ? "*" : ""); 557 } 558 return size; 559 case SMU_SOCCLK: 560 count = NUM_SOCCLK_DPM_LEVELS; 561 cur_value = metrics.ClockFrequency[CLOCK_SOCCLK]; 562 break; 563 case SMU_MCLK: 564 count = NUM_MEMCLK_DPM_LEVELS; 565 cur_value = metrics.ClockFrequency[CLOCK_FCLK]; 566 break; 567 case SMU_DCEFCLK: 568 count = NUM_DCFCLK_DPM_LEVELS; 569 cur_value = metrics.ClockFrequency[CLOCK_DCFCLK]; 570 break; 571 case SMU_FCLK: 572 count = NUM_FCLK_DPM_LEVELS; 573 cur_value = metrics.ClockFrequency[CLOCK_FCLK]; 574 break; 575 case SMU_VCLK: 576 count = NUM_VCN_DPM_LEVELS; 577 cur_value = metrics.ClockFrequency[CLOCK_VCLK]; 578 break; 579 case SMU_DCLK: 580 count = NUM_VCN_DPM_LEVELS; 581 cur_value = metrics.ClockFrequency[CLOCK_DCLK]; 582 break; 583 default: 584 break; 585 } 586 587 switch (clk_type) { 588 case SMU_SOCCLK: 589 case SMU_MCLK: 590 case SMU_DCEFCLK: 591 case SMU_FCLK: 592 case SMU_VCLK: 593 case SMU_DCLK: 594 for (i = 0; i < count; i++) { 595 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 596 ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); 597 if (ret) 598 return ret; 599 if (!value) 600 continue; 601 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 602 cur_value == value ? "*" : ""); 603 if (cur_value == value) 604 cur_value_match_level = true; 605 } 606 607 if (!cur_value_match_level) 608 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); 609 610 break; 611 default: 612 break; 613 } 614 615 return size; 616 } 617 618 static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu) 619 { 620 enum amd_pm_state_type pm_type; 621 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 622 623 if (!smu_dpm_ctx->dpm_context || 624 !smu_dpm_ctx->dpm_current_power_state) 625 return -EINVAL; 626 627 switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { 628 case SMU_STATE_UI_LABEL_BATTERY: 629 pm_type = POWER_STATE_TYPE_BATTERY; 630 break; 631 case SMU_STATE_UI_LABEL_BALLANCED: 632 pm_type = POWER_STATE_TYPE_BALANCED; 633 break; 634 case SMU_STATE_UI_LABEL_PERFORMANCE: 635 pm_type = POWER_STATE_TYPE_PERFORMANCE; 636 break; 637 default: 638 if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT) 639 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; 640 else 641 pm_type = POWER_STATE_TYPE_DEFAULT; 642 break; 643 } 644 645 return pm_type; 646 } 647 648 static int renoir_dpm_set_vcn_enable(struct smu_context *smu, 649 bool enable, 650 int inst) 651 { 652 int ret = 0; 653 654 if (enable) { 655 /* vcn dpm on is a prerequisite for vcn power gate messages */ 656 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { 657 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); 658 if (ret) 659 return ret; 660 } 661 } else { 662 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { 663 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); 664 if (ret) 665 return ret; 666 } 667 } 668 669 return ret; 670 } 671 672 static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 673 { 674 int ret = 0; 675 676 if (enable) { 677 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { 678 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); 679 if (ret) 680 return ret; 681 } 682 } else { 683 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { 684 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); 685 if (ret) 686 return ret; 687 } 688 } 689 690 return ret; 691 } 692 693 static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) 694 { 695 int ret = 0, i = 0; 696 uint32_t min_freq, max_freq, force_freq; 697 enum smu_clk_type clk_type; 698 699 enum smu_clk_type clks[] = { 700 SMU_GFXCLK, 701 SMU_MCLK, 702 SMU_SOCCLK, 703 }; 704 705 for (i = 0; i < ARRAY_SIZE(clks); i++) { 706 clk_type = clks[i]; 707 ret = renoir_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 708 if (ret) 709 return ret; 710 711 force_freq = highest ? max_freq : min_freq; 712 ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); 713 if (ret) 714 return ret; 715 } 716 717 return ret; 718 } 719 720 static int renoir_unforce_dpm_levels(struct smu_context *smu) { 721 722 int ret = 0, i = 0; 723 uint32_t min_freq, max_freq; 724 enum smu_clk_type clk_type; 725 726 struct clk_feature_map { 727 enum smu_clk_type clk_type; 728 uint32_t feature; 729 } clk_feature_map[] = { 730 {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT}, 731 {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT}, 732 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 733 }; 734 735 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { 736 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) 737 continue; 738 739 clk_type = clk_feature_map[i].clk_type; 740 741 ret = renoir_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); 742 if (ret) 743 return ret; 744 745 ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); 746 if (ret) 747 return ret; 748 } 749 750 return ret; 751 } 752 753 /* 754 * This interface get dpm clock table for dc 755 */ 756 static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) 757 { 758 DpmClocks_t *table = smu->smu_table.clocks_table; 759 int i; 760 761 if (!clock_table || !table) 762 return -EINVAL; 763 764 for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) { 765 clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq; 766 clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol; 767 } 768 769 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { 770 clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq; 771 clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol; 772 } 773 774 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 775 clock_table->FClocks[i].Freq = table->FClocks[i].Freq; 776 clock_table->FClocks[i].Vol = table->FClocks[i].Vol; 777 } 778 779 for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) { 780 clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq; 781 clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol; 782 } 783 784 for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) { 785 clock_table->VClocks[i].Freq = table->VClocks[i].Freq; 786 clock_table->VClocks[i].Vol = table->VClocks[i].Vol; 787 } 788 789 for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) { 790 clock_table->DClocks[i].Freq = table->DClocks[i].Freq; 791 clock_table->DClocks[i].Vol = table->DClocks[i].Vol; 792 } 793 794 return 0; 795 } 796 797 static int renoir_force_clk_levels(struct smu_context *smu, 798 enum smu_clk_type clk_type, uint32_t mask) 799 { 800 801 int ret = 0 ; 802 uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; 803 804 soft_min_level = mask ? (ffs(mask) - 1) : 0; 805 soft_max_level = mask ? (fls(mask) - 1) : 0; 806 807 switch (clk_type) { 808 case SMU_GFXCLK: 809 case SMU_SCLK: 810 if (soft_min_level > 2 || soft_max_level > 2) { 811 dev_info(smu->adev->dev, "Currently sclk only support 3 levels on APU\n"); 812 return -EINVAL; 813 } 814 815 ret = renoir_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min_freq, &max_freq); 816 if (ret) 817 return ret; 818 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 819 soft_max_level == 0 ? min_freq : 820 soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq, 821 NULL); 822 if (ret) 823 return ret; 824 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 825 soft_min_level == 2 ? max_freq : 826 soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq, 827 NULL); 828 if (ret) 829 return ret; 830 break; 831 case SMU_SOCCLK: 832 ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_min_level, &min_freq); 833 if (ret) 834 return ret; 835 ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq); 836 if (ret) 837 return ret; 838 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL); 839 if (ret) 840 return ret; 841 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL); 842 if (ret) 843 return ret; 844 break; 845 case SMU_MCLK: 846 case SMU_FCLK: 847 ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_min_level, &min_freq); 848 if (ret) 849 return ret; 850 ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq); 851 if (ret) 852 return ret; 853 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL); 854 if (ret) 855 return ret; 856 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL); 857 if (ret) 858 return ret; 859 break; 860 default: 861 break; 862 } 863 864 return ret; 865 } 866 867 static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 868 { 869 int workload_type, ret; 870 uint32_t profile_mode = input[size]; 871 872 if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 873 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); 874 return -EINVAL; 875 } 876 877 if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || 878 profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) 879 return 0; 880 881 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 882 workload_type = smu_cmn_to_asic_specific_index(smu, 883 CMN2ASIC_MAPPING_WORKLOAD, 884 profile_mode); 885 if (workload_type < 0) { 886 /* 887 * TODO: If some case need switch to powersave/default power mode 888 * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving. 889 */ 890 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode); 891 return -EINVAL; 892 } 893 894 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 895 smu->workload_mask, 896 NULL); 897 if (ret) { 898 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); 899 return ret; 900 } 901 902 smu_cmn_assign_power_profile(smu); 903 904 return 0; 905 } 906 907 static int renoir_set_peak_clock_by_device(struct smu_context *smu) 908 { 909 int ret = 0; 910 uint32_t sclk_freq = 0, uclk_freq = 0; 911 912 ret = renoir_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_freq); 913 if (ret) 914 return ret; 915 916 ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); 917 if (ret) 918 return ret; 919 920 ret = renoir_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &uclk_freq); 921 if (ret) 922 return ret; 923 924 ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); 925 if (ret) 926 return ret; 927 928 return ret; 929 } 930 931 static int renior_set_dpm_profile_freq(struct smu_context *smu, 932 enum amd_dpm_forced_level level, 933 enum smu_clk_type clk_type) 934 { 935 int ret = 0; 936 uint32_t sclk = 0, socclk = 0, fclk = 0; 937 938 switch (clk_type) { 939 case SMU_GFXCLK: 940 case SMU_SCLK: 941 sclk = RENOIR_UMD_PSTATE_GFXCLK; 942 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 943 renoir_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk); 944 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) 945 renoir_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk, NULL); 946 break; 947 case SMU_SOCCLK: 948 socclk = RENOIR_UMD_PSTATE_SOCCLK; 949 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 950 renoir_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk); 951 break; 952 case SMU_FCLK: 953 case SMU_MCLK: 954 fclk = RENOIR_UMD_PSTATE_FCLK; 955 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 956 renoir_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk); 957 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) 958 renoir_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk, NULL); 959 break; 960 default: 961 ret = -EINVAL; 962 break; 963 } 964 965 if (sclk) 966 ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk, false); 967 968 if (socclk) 969 ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk, false); 970 971 if (fclk) 972 ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk, false); 973 974 return ret; 975 } 976 977 static int renoir_set_performance_level(struct smu_context *smu, 978 enum amd_dpm_forced_level level) 979 { 980 int ret = 0; 981 982 switch (level) { 983 case AMD_DPM_FORCED_LEVEL_HIGH: 984 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 985 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 986 987 ret = renoir_force_dpm_limit_value(smu, true); 988 break; 989 case AMD_DPM_FORCED_LEVEL_LOW: 990 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 991 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 992 993 ret = renoir_force_dpm_limit_value(smu, false); 994 break; 995 case AMD_DPM_FORCED_LEVEL_AUTO: 996 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 997 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 998 999 ret = renoir_unforce_dpm_levels(smu); 1000 break; 1001 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1002 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1003 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1004 1005 ret = smu_cmn_send_smc_msg_with_param(smu, 1006 SMU_MSG_SetHardMinGfxClk, 1007 RENOIR_UMD_PSTATE_GFXCLK, 1008 NULL); 1009 if (ret) 1010 return ret; 1011 ret = smu_cmn_send_smc_msg_with_param(smu, 1012 SMU_MSG_SetHardMinFclkByFreq, 1013 RENOIR_UMD_PSTATE_FCLK, 1014 NULL); 1015 if (ret) 1016 return ret; 1017 ret = smu_cmn_send_smc_msg_with_param(smu, 1018 SMU_MSG_SetHardMinSocclkByFreq, 1019 RENOIR_UMD_PSTATE_SOCCLK, 1020 NULL); 1021 if (ret) 1022 return ret; 1023 ret = smu_cmn_send_smc_msg_with_param(smu, 1024 SMU_MSG_SetHardMinVcn, 1025 RENOIR_UMD_PSTATE_VCNCLK, 1026 NULL); 1027 if (ret) 1028 return ret; 1029 1030 ret = smu_cmn_send_smc_msg_with_param(smu, 1031 SMU_MSG_SetSoftMaxGfxClk, 1032 RENOIR_UMD_PSTATE_GFXCLK, 1033 NULL); 1034 if (ret) 1035 return ret; 1036 ret = smu_cmn_send_smc_msg_with_param(smu, 1037 SMU_MSG_SetSoftMaxFclkByFreq, 1038 RENOIR_UMD_PSTATE_FCLK, 1039 NULL); 1040 if (ret) 1041 return ret; 1042 ret = smu_cmn_send_smc_msg_with_param(smu, 1043 SMU_MSG_SetSoftMaxSocclkByFreq, 1044 RENOIR_UMD_PSTATE_SOCCLK, 1045 NULL); 1046 if (ret) 1047 return ret; 1048 ret = smu_cmn_send_smc_msg_with_param(smu, 1049 SMU_MSG_SetSoftMaxVcn, 1050 RENOIR_UMD_PSTATE_VCNCLK, 1051 NULL); 1052 if (ret) 1053 return ret; 1054 break; 1055 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1056 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1057 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1058 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1059 1060 renior_set_dpm_profile_freq(smu, level, SMU_SCLK); 1061 renior_set_dpm_profile_freq(smu, level, SMU_MCLK); 1062 renior_set_dpm_profile_freq(smu, level, SMU_SOCCLK); 1063 break; 1064 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1065 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1066 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1067 1068 ret = renoir_set_peak_clock_by_device(smu); 1069 break; 1070 case AMD_DPM_FORCED_LEVEL_MANUAL: 1071 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1072 default: 1073 break; 1074 } 1075 return ret; 1076 } 1077 1078 /* save watermark settings into pplib smu structure, 1079 * also pass data to smu controller 1080 */ 1081 static int renoir_set_watermarks_table( 1082 struct smu_context *smu, 1083 struct pp_smu_wm_range_sets *clock_ranges) 1084 { 1085 Watermarks_t *table = smu->smu_table.watermarks_table; 1086 int ret = 0; 1087 int i; 1088 1089 if (clock_ranges) { 1090 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 1091 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 1092 return -EINVAL; 1093 1094 /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/ 1095 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 1096 table->WatermarkRow[WM_DCFCLK][i].MinClock = 1097 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 1098 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 1099 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 1100 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 1101 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 1102 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 1103 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 1104 1105 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 1106 clock_ranges->reader_wm_sets[i].wm_inst; 1107 table->WatermarkRow[WM_DCFCLK][i].WmType = 1108 clock_ranges->reader_wm_sets[i].wm_type; 1109 } 1110 1111 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 1112 table->WatermarkRow[WM_SOCCLK][i].MinClock = 1113 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 1114 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 1115 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 1116 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 1117 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 1118 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 1119 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 1120 1121 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 1122 clock_ranges->writer_wm_sets[i].wm_inst; 1123 table->WatermarkRow[WM_SOCCLK][i].WmType = 1124 clock_ranges->writer_wm_sets[i].wm_type; 1125 } 1126 1127 smu->watermarks_bitmap |= WATERMARKS_EXIST; 1128 } 1129 1130 /* pass data to smu controller */ 1131 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 1132 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 1133 ret = smu_cmn_write_watermarks_table(smu); 1134 if (ret) { 1135 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 1136 return ret; 1137 } 1138 smu->watermarks_bitmap |= WATERMARKS_LOADED; 1139 } 1140 1141 return 0; 1142 } 1143 1144 static int renoir_get_power_profile_mode(struct smu_context *smu, 1145 char *buf) 1146 { 1147 uint32_t i, size = 0; 1148 int16_t workload_type = 0; 1149 1150 if (!buf) 1151 return -EINVAL; 1152 1153 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 1154 /* 1155 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT 1156 * Not all profile modes are supported on arcturus. 1157 */ 1158 workload_type = smu_cmn_to_asic_specific_index(smu, 1159 CMN2ASIC_MAPPING_WORKLOAD, 1160 i); 1161 if (workload_type < 0) 1162 continue; 1163 1164 size += sysfs_emit_at(buf, size, "%2d %14s%s\n", 1165 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1166 } 1167 1168 return size; 1169 } 1170 1171 static void renoir_get_ss_power_percent(SmuMetrics_t *metrics, 1172 uint32_t *apu_percent, uint32_t *dgpu_percent) 1173 { 1174 uint32_t apu_boost = 0; 1175 uint32_t dgpu_boost = 0; 1176 uint16_t apu_limit = 0; 1177 uint16_t dgpu_limit = 0; 1178 uint16_t apu_power = 0; 1179 uint16_t dgpu_power = 0; 1180 1181 apu_power = metrics->ApuPower; 1182 apu_limit = metrics->StapmOriginalLimit; 1183 if (apu_power > apu_limit && apu_limit != 0) 1184 apu_boost = ((apu_power - apu_limit) * 100) / apu_limit; 1185 apu_boost = (apu_boost > 100) ? 100 : apu_boost; 1186 1187 dgpu_power = metrics->dGpuPower; 1188 if (metrics->StapmCurrentLimit > metrics->StapmOriginalLimit) 1189 dgpu_limit = metrics->StapmCurrentLimit - metrics->StapmOriginalLimit; 1190 if (dgpu_power > dgpu_limit && dgpu_limit != 0) 1191 dgpu_boost = ((dgpu_power - dgpu_limit) * 100) / dgpu_limit; 1192 dgpu_boost = (dgpu_boost > 100) ? 100 : dgpu_boost; 1193 1194 if (dgpu_boost >= apu_boost) 1195 apu_boost = 0; 1196 else 1197 dgpu_boost = 0; 1198 1199 *apu_percent = apu_boost; 1200 *dgpu_percent = dgpu_boost; 1201 } 1202 1203 1204 static int renoir_get_smu_metrics_data(struct smu_context *smu, 1205 MetricsMember_t member, 1206 uint32_t *value) 1207 { 1208 struct smu_table_context *smu_table = &smu->smu_table; 1209 1210 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 1211 int ret = 0; 1212 uint32_t apu_percent = 0; 1213 uint32_t dgpu_percent = 0; 1214 struct amdgpu_device *adev = smu->adev; 1215 1216 1217 ret = smu_cmn_get_metrics_table(smu, 1218 NULL, 1219 false); 1220 if (ret) 1221 return ret; 1222 1223 switch (member) { 1224 case METRICS_AVERAGE_GFXCLK: 1225 *value = metrics->ClockFrequency[CLOCK_GFXCLK]; 1226 break; 1227 case METRICS_AVERAGE_SOCCLK: 1228 *value = metrics->ClockFrequency[CLOCK_SOCCLK]; 1229 break; 1230 case METRICS_AVERAGE_UCLK: 1231 *value = metrics->ClockFrequency[CLOCK_FCLK]; 1232 break; 1233 case METRICS_AVERAGE_GFXACTIVITY: 1234 *value = metrics->AverageGfxActivity / 100; 1235 break; 1236 case METRICS_AVERAGE_VCNACTIVITY: 1237 *value = metrics->AverageUvdActivity / 100; 1238 break; 1239 case METRICS_CURR_SOCKETPOWER: 1240 if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == 1241 IP_VERSION(12, 0, 1)) && 1242 (adev->pm.fw_version >= 0x40000f)) || 1243 ((amdgpu_ip_version(adev, MP1_HWIP, 0) == 1244 IP_VERSION(12, 0, 0)) && 1245 (adev->pm.fw_version >= 0x373200))) 1246 *value = metrics->CurrentSocketPower << 8; 1247 else 1248 *value = (metrics->CurrentSocketPower << 8) / 1000; 1249 break; 1250 case METRICS_TEMPERATURE_EDGE: 1251 *value = (metrics->GfxTemperature / 100) * 1252 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1253 break; 1254 case METRICS_TEMPERATURE_HOTSPOT: 1255 *value = (metrics->SocTemperature / 100) * 1256 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1257 break; 1258 case METRICS_THROTTLER_STATUS: 1259 *value = metrics->ThrottlerStatus; 1260 break; 1261 case METRICS_VOLTAGE_VDDGFX: 1262 *value = metrics->Voltage[0]; 1263 break; 1264 case METRICS_VOLTAGE_VDDSOC: 1265 *value = metrics->Voltage[1]; 1266 break; 1267 case METRICS_SS_APU_SHARE: 1268 /* return the percentage of APU power boost 1269 * with respect to APU's power limit. 1270 */ 1271 renoir_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); 1272 *value = apu_percent; 1273 break; 1274 case METRICS_SS_DGPU_SHARE: 1275 /* return the percentage of dGPU power boost 1276 * with respect to dGPU's power limit. 1277 */ 1278 renoir_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); 1279 *value = dgpu_percent; 1280 break; 1281 default: 1282 *value = UINT_MAX; 1283 break; 1284 } 1285 1286 return ret; 1287 } 1288 1289 static int renoir_read_sensor(struct smu_context *smu, 1290 enum amd_pp_sensors sensor, 1291 void *data, uint32_t *size) 1292 { 1293 int ret = 0; 1294 1295 if (!data || !size) 1296 return -EINVAL; 1297 1298 switch (sensor) { 1299 case AMDGPU_PP_SENSOR_GPU_LOAD: 1300 ret = renoir_get_smu_metrics_data(smu, 1301 METRICS_AVERAGE_GFXACTIVITY, 1302 (uint32_t *)data); 1303 *size = 4; 1304 break; 1305 case AMDGPU_PP_SENSOR_EDGE_TEMP: 1306 ret = renoir_get_smu_metrics_data(smu, 1307 METRICS_TEMPERATURE_EDGE, 1308 (uint32_t *)data); 1309 *size = 4; 1310 break; 1311 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1312 ret = renoir_get_smu_metrics_data(smu, 1313 METRICS_TEMPERATURE_HOTSPOT, 1314 (uint32_t *)data); 1315 *size = 4; 1316 break; 1317 case AMDGPU_PP_SENSOR_GFX_MCLK: 1318 ret = renoir_get_smu_metrics_data(smu, 1319 METRICS_AVERAGE_UCLK, 1320 (uint32_t *)data); 1321 *(uint32_t *)data *= 100; 1322 *size = 4; 1323 break; 1324 case AMDGPU_PP_SENSOR_GFX_SCLK: 1325 ret = renoir_get_smu_metrics_data(smu, 1326 METRICS_AVERAGE_GFXCLK, 1327 (uint32_t *)data); 1328 *(uint32_t *)data *= 100; 1329 *size = 4; 1330 break; 1331 case AMDGPU_PP_SENSOR_VDDGFX: 1332 ret = renoir_get_smu_metrics_data(smu, 1333 METRICS_VOLTAGE_VDDGFX, 1334 (uint32_t *)data); 1335 *size = 4; 1336 break; 1337 case AMDGPU_PP_SENSOR_VDDNB: 1338 ret = renoir_get_smu_metrics_data(smu, 1339 METRICS_VOLTAGE_VDDSOC, 1340 (uint32_t *)data); 1341 *size = 4; 1342 break; 1343 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 1344 ret = renoir_get_smu_metrics_data(smu, 1345 METRICS_CURR_SOCKETPOWER, 1346 (uint32_t *)data); 1347 *size = 4; 1348 break; 1349 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 1350 ret = renoir_get_smu_metrics_data(smu, 1351 METRICS_SS_APU_SHARE, 1352 (uint32_t *)data); 1353 *size = 4; 1354 break; 1355 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 1356 ret = renoir_get_smu_metrics_data(smu, 1357 METRICS_SS_DGPU_SHARE, 1358 (uint32_t *)data); 1359 *size = 4; 1360 break; 1361 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 1362 default: 1363 ret = -EOPNOTSUPP; 1364 break; 1365 } 1366 1367 return ret; 1368 } 1369 1370 static bool renoir_is_dpm_running(struct smu_context *smu) 1371 { 1372 struct amdgpu_device *adev = smu->adev; 1373 1374 /* 1375 * Until now, the pmfw hasn't exported the interface of SMU 1376 * feature mask to APU SKU so just force on all the feature 1377 * at early initial stage. 1378 */ 1379 if (adev->in_suspend) 1380 return false; 1381 else 1382 return true; 1383 1384 } 1385 1386 static ssize_t renoir_get_gpu_metrics(struct smu_context *smu, 1387 void **table) 1388 { 1389 struct smu_table_context *smu_table = &smu->smu_table; 1390 struct gpu_metrics_v2_2 *gpu_metrics = 1391 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1392 SmuMetrics_t metrics; 1393 int ret = 0; 1394 1395 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1396 if (ret) 1397 return ret; 1398 1399 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1400 1401 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1402 gpu_metrics->temperature_soc = metrics.SocTemperature; 1403 memcpy(&gpu_metrics->temperature_core[0], 1404 &metrics.CoreTemperature[0], 1405 sizeof(uint16_t) * 8); 1406 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1407 gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1]; 1408 1409 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; 1410 gpu_metrics->average_mm_activity = metrics.AverageUvdActivity; 1411 1412 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1413 gpu_metrics->average_cpu_power = metrics.Power[0]; 1414 gpu_metrics->average_soc_power = metrics.Power[1]; 1415 memcpy(&gpu_metrics->average_core_power[0], 1416 &metrics.CorePower[0], 1417 sizeof(uint16_t) * 8); 1418 1419 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; 1420 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; 1421 gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency; 1422 gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency; 1423 1424 gpu_metrics->current_gfxclk = metrics.ClockFrequency[CLOCK_GFXCLK]; 1425 gpu_metrics->current_socclk = metrics.ClockFrequency[CLOCK_SOCCLK]; 1426 gpu_metrics->current_uclk = metrics.ClockFrequency[CLOCK_UMCCLK]; 1427 gpu_metrics->current_fclk = metrics.ClockFrequency[CLOCK_FCLK]; 1428 gpu_metrics->current_vclk = metrics.ClockFrequency[CLOCK_VCLK]; 1429 gpu_metrics->current_dclk = metrics.ClockFrequency[CLOCK_DCLK]; 1430 memcpy(&gpu_metrics->current_coreclk[0], 1431 &metrics.CoreFrequency[0], 1432 sizeof(uint16_t) * 8); 1433 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1434 gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1]; 1435 1436 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1437 gpu_metrics->indep_throttle_status = 1438 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1439 renoir_throttler_map); 1440 1441 gpu_metrics->fan_pwm = metrics.FanPwm; 1442 1443 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1444 1445 *table = (void *)gpu_metrics; 1446 1447 return sizeof(struct gpu_metrics_v2_2); 1448 } 1449 1450 static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state) 1451 { 1452 1453 return 0; 1454 } 1455 1456 static int renoir_get_enabled_mask(struct smu_context *smu, 1457 uint64_t *feature_mask) 1458 { 1459 if (!feature_mask) 1460 return -EINVAL; 1461 memset(feature_mask, 0xff, sizeof(*feature_mask)); 1462 1463 return 0; 1464 } 1465 1466 static const struct pptable_funcs renoir_ppt_funcs = { 1467 .set_power_state = NULL, 1468 .print_clk_levels = renoir_print_clk_levels, 1469 .get_current_power_state = renoir_get_current_power_state, 1470 .dpm_set_vcn_enable = renoir_dpm_set_vcn_enable, 1471 .dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable, 1472 .force_clk_levels = renoir_force_clk_levels, 1473 .set_power_profile_mode = renoir_set_power_profile_mode, 1474 .set_performance_level = renoir_set_performance_level, 1475 .get_dpm_clock_table = renoir_get_dpm_clock_table, 1476 .set_watermarks_table = renoir_set_watermarks_table, 1477 .get_power_profile_mode = renoir_get_power_profile_mode, 1478 .read_sensor = renoir_read_sensor, 1479 .check_fw_status = smu_v12_0_check_fw_status, 1480 .check_fw_version = smu_v12_0_check_fw_version, 1481 .powergate_sdma = smu_v12_0_powergate_sdma, 1482 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1483 .send_smc_msg = smu_cmn_send_smc_msg, 1484 .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, 1485 .gfx_off_control = smu_v12_0_gfx_off_control, 1486 .get_gfx_off_status = smu_v12_0_get_gfxoff_status, 1487 .init_smc_tables = renoir_init_smc_tables, 1488 .fini_smc_tables = smu_v12_0_fini_smc_tables, 1489 .set_default_dpm_table = smu_v12_0_set_default_dpm_tables, 1490 .get_enabled_mask = renoir_get_enabled_mask, 1491 .feature_is_enabled = smu_cmn_feature_is_enabled, 1492 .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception, 1493 .get_dpm_ultimate_freq = renoir_get_dpm_ultimate_freq, 1494 .mode2_reset = smu_v12_0_mode2_reset, 1495 .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, 1496 .set_driver_table_location = smu_v12_0_set_driver_table_location, 1497 .is_dpm_running = renoir_is_dpm_running, 1498 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1499 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 1500 .get_gpu_metrics = renoir_get_gpu_metrics, 1501 .gfx_state_change_set = renoir_gfx_state_change_set, 1502 .set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters, 1503 .od_edit_dpm_table = renoir_od_edit_dpm_table, 1504 .get_vbios_bootup_values = smu_v12_0_get_vbios_bootup_values, 1505 }; 1506 1507 void renoir_set_ppt_funcs(struct smu_context *smu) 1508 { 1509 struct amdgpu_device *adev = smu->adev; 1510 1511 smu->ppt_funcs = &renoir_ppt_funcs; 1512 smu->message_map = renoir_message_map; 1513 smu->clock_map = renoir_clk_map; 1514 smu->table_map = renoir_table_map; 1515 smu->workload_map = renoir_workload_map; 1516 smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION; 1517 smu->is_apu = true; 1518 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 1519 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 1520 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 1521 } 1522