1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "smu_v14_0.h"
35 #include "smu14_driver_if_v14_0.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "smu_v14_0_2_ppt.h"
39 #include "smu_v14_0_2_pptable.h"
40 #include "smu_v14_0_2_ppsmc.h"
41 #include "mp/mp_14_0_2_offset.h"
42 #include "mp/mp_14_0_2_sh_mask.h"
43
44 #include "smu_cmn.h"
45 #include "amdgpu_ras.h"
46
47 /*
48 * DO NOT use these for err/warn/info/debug messages.
49 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
50 * They are more MGPU friendly.
51 */
52 #undef pr_err
53 #undef pr_warn
54 #undef pr_info
55 #undef pr_debug
56
57 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
58
59 #define FEATURE_MASK(feature) (1ULL << feature)
60 #define SMC_DPM_FEATURE ( \
61 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
62 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
63 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
64 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
65 FEATURE_MASK(FEATURE_DPM_FCLK_BIT))
66
67 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
68 #define DEBUGSMC_MSG_Mode1Reset 2
69 #define LINK_SPEED_MAX 3
70
71 #define PP_OD_FEATURE_GFXCLK_FMIN 0
72 #define PP_OD_FEATURE_GFXCLK_FMAX 1
73 #define PP_OD_FEATURE_UCLK_FMIN 2
74 #define PP_OD_FEATURE_UCLK_FMAX 3
75 #define PP_OD_FEATURE_GFX_VF_CURVE 4
76 #define PP_OD_FEATURE_FAN_CURVE_TEMP 5
77 #define PP_OD_FEATURE_FAN_CURVE_PWM 6
78 #define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
79 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
80 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
81 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
82
83 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
84 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
85 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
86 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
87 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
88 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
89 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
90 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
91 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
92 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
93 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
94 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
95 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
96 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
97 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
98 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
99 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
100 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
101 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
102 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
103 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
104 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
105 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
106 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
107 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
108 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
109 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
110 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
111 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
112 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
113 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
114 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
115 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
116 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
117 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
118 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
119 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
120 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
121 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
122 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
123 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
124 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
125 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
126 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
127 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
128 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
129 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
130 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
131 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
132 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
133 MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0),
134 MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
135 PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
136 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
137 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
138 };
139
140 static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = {
141 CLK_MAP(GFXCLK, PPCLK_GFXCLK),
142 CLK_MAP(SCLK, PPCLK_GFXCLK),
143 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
144 CLK_MAP(FCLK, PPCLK_FCLK),
145 CLK_MAP(UCLK, PPCLK_UCLK),
146 CLK_MAP(MCLK, PPCLK_UCLK),
147 CLK_MAP(VCLK, PPCLK_VCLK_0),
148 CLK_MAP(DCLK, PPCLK_DCLK_0),
149 CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
150 };
151
152 static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = {
153 FEA_MAP(FW_DATA_READ),
154 FEA_MAP(DPM_GFXCLK),
155 FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
156 FEA_MAP(DPM_UCLK),
157 FEA_MAP(DPM_FCLK),
158 FEA_MAP(DPM_SOCCLK),
159 FEA_MAP(DPM_LINK),
160 FEA_MAP(DPM_DCN),
161 FEA_MAP(VMEMP_SCALING),
162 FEA_MAP(VDDIO_MEM_SCALING),
163 FEA_MAP(DS_GFXCLK),
164 FEA_MAP(DS_SOCCLK),
165 FEA_MAP(DS_FCLK),
166 FEA_MAP(DS_LCLK),
167 FEA_MAP(DS_DCFCLK),
168 FEA_MAP(DS_UCLK),
169 FEA_MAP(GFX_ULV),
170 FEA_MAP(FW_DSTATE),
171 FEA_MAP(GFXOFF),
172 FEA_MAP(BACO),
173 FEA_MAP(MM_DPM),
174 FEA_MAP(SOC_MPCLK_DS),
175 FEA_MAP(BACO_MPCLK_DS),
176 FEA_MAP(THROTTLERS),
177 FEA_MAP(SMARTSHIFT),
178 FEA_MAP(GTHR),
179 FEA_MAP(ACDC),
180 FEA_MAP(VR0HOT),
181 FEA_MAP(FW_CTF),
182 FEA_MAP(FAN_CONTROL),
183 FEA_MAP(GFX_DCS),
184 FEA_MAP(GFX_READ_MARGIN),
185 FEA_MAP(LED_DISPLAY),
186 FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
187 FEA_MAP(OUT_OF_BAND_MONITOR),
188 FEA_MAP(OPTIMIZED_VMIN),
189 FEA_MAP(GFX_IMU),
190 FEA_MAP(BOOT_TIME_CAL),
191 FEA_MAP(GFX_PCC_DFLL),
192 FEA_MAP(SOC_CG),
193 FEA_MAP(DF_CSTATE),
194 FEA_MAP(GFX_EDC),
195 FEA_MAP(BOOT_POWER_OPT),
196 FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
197 FEA_MAP(DS_VCN),
198 FEA_MAP(BACO_CG),
199 FEA_MAP(MEM_TEMP_READ),
200 FEA_MAP(ATHUB_MMHUB_PG),
201 FEA_MAP(SOC_PCC),
202 FEA_MAP(EDC_PWRBRK),
203 FEA_MAP(SOC_EDC_XVMIN),
204 FEA_MAP(GFX_PSM_DIDT),
205 FEA_MAP(APT_ALL_ENABLE),
206 FEA_MAP(APT_SQ_THROTTLE),
207 FEA_MAP(APT_PF_DCS),
208 FEA_MAP(GFX_EDC_XVMIN),
209 FEA_MAP(GFX_DIDT_XVMIN),
210 FEA_MAP(FAN_ABNORMAL),
211 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
212 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
213 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
214 };
215
216 static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = {
217 TAB_MAP(PPTABLE),
218 TAB_MAP(WATERMARKS),
219 TAB_MAP(AVFS_PSM_DEBUG),
220 TAB_MAP(PMSTATUSLOG),
221 TAB_MAP(SMU_METRICS),
222 TAB_MAP(DRIVER_SMU_CONFIG),
223 TAB_MAP(ACTIVITY_MONITOR_COEFF),
224 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
225 TAB_MAP(I2C_COMMANDS),
226 TAB_MAP(ECCINFO),
227 TAB_MAP(OVERDRIVE),
228 };
229
230 static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
231 PWR_MAP(AC),
232 PWR_MAP(DC),
233 };
234
235 static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
236 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
237 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
238 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
239 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
240 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
241 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
242 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
243 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
244 };
245
246 static const uint8_t smu_v14_0_2_throttler_map[] = {
247 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
248 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
249 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
250 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
251 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
252 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
253 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
254 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
255 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
256 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
257 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
258 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
259 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
260 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
261 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
262 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
263 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
264 };
265
266 static int
smu_v14_0_2_get_allowed_feature_mask(struct smu_context * smu,uint32_t * feature_mask,uint32_t num)267 smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
268 uint32_t *feature_mask, uint32_t num)
269 {
270 struct amdgpu_device *adev = smu->adev;
271 /*u32 smu_version;*/
272
273 if (num > 2)
274 return -EINVAL;
275
276 memset(feature_mask, 0xff, sizeof(uint32_t) * num);
277
278 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
279 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
280 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
281 }
282 #if 0
283 if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
284 !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
285 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
286
287 if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
288 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
289
290 /* PMFW 78.58 contains a critical fix for gfxoff feature */
291 smu_cmn_get_smc_version(smu, NULL, &smu_version);
292 if ((smu_version < 0x004e3a00) ||
293 !(adev->pm.pp_feature & PP_GFXOFF_MASK))
294 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
295
296 if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
297 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
298 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
299 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
300 }
301
302 if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
303 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
304
305 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
306 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
307 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
308 }
309
310 if (!(adev->pm.pp_feature & PP_ULV_MASK))
311 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
312 #endif
313
314 return 0;
315 }
316
smu_v14_0_2_check_powerplay_table(struct smu_context * smu)317 static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu)
318 {
319 struct smu_table_context *table_context = &smu->smu_table;
320 struct smu_14_0_2_powerplay_table *powerplay_table =
321 table_context->power_play_table;
322 struct smu_baco_context *smu_baco = &smu->smu_baco;
323 PPTable_t *pptable = smu->smu_table.driver_pptable;
324 const OverDriveLimits_t * const overdrive_upperlimits =
325 &pptable->SkuTable.OverDriveLimitsBasicMax;
326 const OverDriveLimits_t * const overdrive_lowerlimits =
327 &pptable->SkuTable.OverDriveLimitsBasicMin;
328
329 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC)
330 smu->dc_controlled_by_gpio = true;
331
332 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) {
333 smu_baco->platform_support = true;
334
335 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO)
336 smu_baco->maco_support = true;
337 }
338
339 if (!overdrive_lowerlimits->FeatureCtrlMask ||
340 !overdrive_upperlimits->FeatureCtrlMask)
341 smu->od_enabled = false;
342
343 table_context->thermal_controller_type =
344 powerplay_table->thermal_controller_type;
345
346 /*
347 * Instead of having its own buffer space and get overdrive_table copied,
348 * smu->od_settings just points to the actual overdrive_table
349 */
350 smu->od_settings = &powerplay_table->overdrive_table;
351
352 smu->adev->pm.no_fan =
353 !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
354
355 return 0;
356 }
357
smu_v14_0_2_store_powerplay_table(struct smu_context * smu)358 static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu)
359 {
360 struct smu_table_context *table_context = &smu->smu_table;
361 struct smu_14_0_2_powerplay_table *powerplay_table =
362 table_context->power_play_table;
363
364 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
365 sizeof(PPTable_t));
366
367 return 0;
368 }
369
smu_v14_0_2_get_pptable_from_pmfw(struct smu_context * smu,void ** table,uint32_t * size)370 static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
371 void **table,
372 uint32_t *size)
373 {
374 struct smu_table_context *smu_table = &smu->smu_table;
375 void *combo_pptable = smu_table->combo_pptable;
376 int ret = 0;
377
378 ret = smu_cmn_get_combo_pptable(smu);
379 if (ret)
380 return ret;
381
382 *table = combo_pptable;
383 *size = sizeof(struct smu_14_0_2_powerplay_table);
384
385 return 0;
386 }
387
smu_v14_0_2_setup_pptable(struct smu_context * smu)388 static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
389 {
390 struct smu_table_context *smu_table = &smu->smu_table;
391 int ret = 0;
392
393 if (amdgpu_sriov_vf(smu->adev))
394 return 0;
395
396 ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
397 &smu_table->power_play_table,
398 &smu_table->power_play_table_size);
399 if (ret)
400 return ret;
401
402 ret = smu_v14_0_2_store_powerplay_table(smu);
403 if (ret)
404 return ret;
405
406 ret = smu_v14_0_2_check_powerplay_table(smu);
407 if (ret)
408 return ret;
409
410 return ret;
411 }
412
smu_v14_0_2_tables_init(struct smu_context * smu)413 static int smu_v14_0_2_tables_init(struct smu_context *smu)
414 {
415 struct smu_table_context *smu_table = &smu->smu_table;
416 struct smu_table *tables = smu_table->tables;
417
418 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
419 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
420 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
421 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
422 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
423 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
424 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
425 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
426 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
427 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
428 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU14_TOOL_SIZE,
429 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
430 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
431 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
432 AMDGPU_GEM_DOMAIN_VRAM);
433 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
434 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
435 SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
436 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
437
438 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
439 if (!smu_table->metrics_table)
440 goto err0_out;
441 smu_table->metrics_time = 0;
442
443 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
444 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
445 if (!smu_table->gpu_metrics_table)
446 goto err1_out;
447
448 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
449 if (!smu_table->watermarks_table)
450 goto err2_out;
451
452 smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
453 if (!smu_table->ecc_table)
454 goto err3_out;
455
456 return 0;
457
458 err3_out:
459 kfree(smu_table->watermarks_table);
460 err2_out:
461 kfree(smu_table->gpu_metrics_table);
462 err1_out:
463 kfree(smu_table->metrics_table);
464 err0_out:
465 return -ENOMEM;
466 }
467
smu_v14_0_2_allocate_dpm_context(struct smu_context * smu)468 static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu)
469 {
470 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
471
472 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
473 GFP_KERNEL);
474 if (!smu_dpm->dpm_context)
475 return -ENOMEM;
476
477 smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context);
478
479 return 0;
480 }
481
smu_v14_0_2_init_smc_tables(struct smu_context * smu)482 static int smu_v14_0_2_init_smc_tables(struct smu_context *smu)
483 {
484 int ret = 0;
485
486 ret = smu_v14_0_2_tables_init(smu);
487 if (ret)
488 return ret;
489
490 ret = smu_v14_0_2_allocate_dpm_context(smu);
491 if (ret)
492 return ret;
493
494 return smu_v14_0_init_smc_tables(smu);
495 }
496
smu_v14_0_2_set_default_dpm_table(struct smu_context * smu)497 static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
498 {
499 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
500 struct smu_table_context *table_context = &smu->smu_table;
501 PPTable_t *pptable = table_context->driver_pptable;
502 SkuTable_t *skutable = &pptable->SkuTable;
503 struct smu_14_0_dpm_table *dpm_table;
504 struct smu_14_0_pcie_table *pcie_table;
505 uint32_t link_level;
506 int ret = 0;
507
508 /* socclk dpm table setup */
509 dpm_table = &dpm_context->dpm_tables.soc_table;
510 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
511 ret = smu_v14_0_set_single_dpm_table(smu,
512 SMU_SOCCLK,
513 dpm_table);
514 if (ret)
515 return ret;
516 } else {
517 dpm_table->count = 1;
518 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
519 dpm_table->dpm_levels[0].enabled = true;
520 dpm_table->min = dpm_table->dpm_levels[0].value;
521 dpm_table->max = dpm_table->dpm_levels[0].value;
522 }
523
524 /* gfxclk dpm table setup */
525 dpm_table = &dpm_context->dpm_tables.gfx_table;
526 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
527 ret = smu_v14_0_set_single_dpm_table(smu,
528 SMU_GFXCLK,
529 dpm_table);
530 if (ret)
531 return ret;
532
533 /*
534 * Update the reported maximum shader clock to the value
535 * which can be guarded to be achieved on all cards. This
536 * is aligned with Window setting. And considering that value
537 * might be not the peak frequency the card can achieve, it
538 * is normal some real-time clock frequency can overtake this
539 * labelled maximum clock frequency(for example in pp_dpm_sclk
540 * sysfs output).
541 */
542 if (skutable->DriverReportedClocks.GameClockAc &&
543 (dpm_table->dpm_levels[dpm_table->count - 1].value >
544 skutable->DriverReportedClocks.GameClockAc)) {
545 dpm_table->dpm_levels[dpm_table->count - 1].value =
546 skutable->DriverReportedClocks.GameClockAc;
547 dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
548 }
549 } else {
550 dpm_table->count = 1;
551 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
552 dpm_table->dpm_levels[0].enabled = true;
553 dpm_table->min = dpm_table->dpm_levels[0].value;
554 dpm_table->max = dpm_table->dpm_levels[0].value;
555 }
556
557 /* uclk dpm table setup */
558 dpm_table = &dpm_context->dpm_tables.uclk_table;
559 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
560 ret = smu_v14_0_set_single_dpm_table(smu,
561 SMU_UCLK,
562 dpm_table);
563 if (ret)
564 return ret;
565 } else {
566 dpm_table->count = 1;
567 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
568 dpm_table->dpm_levels[0].enabled = true;
569 dpm_table->min = dpm_table->dpm_levels[0].value;
570 dpm_table->max = dpm_table->dpm_levels[0].value;
571 }
572
573 /* fclk dpm table setup */
574 dpm_table = &dpm_context->dpm_tables.fclk_table;
575 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
576 ret = smu_v14_0_set_single_dpm_table(smu,
577 SMU_FCLK,
578 dpm_table);
579 if (ret)
580 return ret;
581 } else {
582 dpm_table->count = 1;
583 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
584 dpm_table->dpm_levels[0].enabled = true;
585 dpm_table->min = dpm_table->dpm_levels[0].value;
586 dpm_table->max = dpm_table->dpm_levels[0].value;
587 }
588
589 /* vclk dpm table setup */
590 dpm_table = &dpm_context->dpm_tables.vclk_table;
591 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
592 ret = smu_v14_0_set_single_dpm_table(smu,
593 SMU_VCLK,
594 dpm_table);
595 if (ret)
596 return ret;
597 } else {
598 dpm_table->count = 1;
599 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
600 dpm_table->dpm_levels[0].enabled = true;
601 dpm_table->min = dpm_table->dpm_levels[0].value;
602 dpm_table->max = dpm_table->dpm_levels[0].value;
603 }
604
605 /* dclk dpm table setup */
606 dpm_table = &dpm_context->dpm_tables.dclk_table;
607 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
608 ret = smu_v14_0_set_single_dpm_table(smu,
609 SMU_DCLK,
610 dpm_table);
611 if (ret)
612 return ret;
613 } else {
614 dpm_table->count = 1;
615 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
616 dpm_table->dpm_levels[0].enabled = true;
617 dpm_table->min = dpm_table->dpm_levels[0].value;
618 dpm_table->max = dpm_table->dpm_levels[0].value;
619 }
620
621 /* lclk dpm table setup */
622 pcie_table = &dpm_context->dpm_tables.pcie_table;
623 pcie_table->num_of_link_levels = 0;
624 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
625 if (!skutable->PcieGenSpeed[link_level] &&
626 !skutable->PcieLaneCount[link_level] &&
627 !skutable->LclkFreq[link_level])
628 continue;
629
630 pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
631 skutable->PcieGenSpeed[link_level];
632 pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
633 skutable->PcieLaneCount[link_level];
634 pcie_table->clk_freq[pcie_table->num_of_link_levels] =
635 skutable->LclkFreq[link_level];
636 pcie_table->num_of_link_levels++;
637
638 if (link_level == 0)
639 link_level++;
640 }
641
642 /* dcefclk dpm table setup */
643 dpm_table = &dpm_context->dpm_tables.dcef_table;
644 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
645 ret = smu_v14_0_set_single_dpm_table(smu,
646 SMU_DCEFCLK,
647 dpm_table);
648 if (ret)
649 return ret;
650 } else {
651 dpm_table->count = 1;
652 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
653 dpm_table->dpm_levels[0].enabled = true;
654 dpm_table->min = dpm_table->dpm_levels[0].value;
655 dpm_table->max = dpm_table->dpm_levels[0].value;
656 }
657
658 return 0;
659 }
660
smu_v14_0_2_is_dpm_running(struct smu_context * smu)661 static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu)
662 {
663 int ret = 0;
664 uint64_t feature_enabled;
665
666 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
667 if (ret)
668 return false;
669
670 return !!(feature_enabled & SMC_DPM_FEATURE);
671 }
672
smu_v14_0_2_get_throttler_status(SmuMetrics_t * metrics)673 static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
674 {
675 uint32_t throttler_status = 0;
676 int i;
677
678 for (i = 0; i < THROTTLER_COUNT; i++)
679 throttler_status |=
680 (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
681
682 return throttler_status;
683 }
684
685 #define SMU_14_0_2_BUSY_THRESHOLD 5
smu_v14_0_2_get_smu_metrics_data(struct smu_context * smu,MetricsMember_t member,uint32_t * value)686 static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
687 MetricsMember_t member,
688 uint32_t *value)
689 {
690 struct smu_table_context *smu_table = &smu->smu_table;
691 SmuMetrics_t *metrics =
692 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
693 int ret = 0;
694
695 ret = smu_cmn_get_metrics_table(smu,
696 NULL,
697 false);
698 if (ret)
699 return ret;
700
701 switch (member) {
702 case METRICS_CURR_GFXCLK:
703 *value = metrics->CurrClock[PPCLK_GFXCLK];
704 break;
705 case METRICS_CURR_SOCCLK:
706 *value = metrics->CurrClock[PPCLK_SOCCLK];
707 break;
708 case METRICS_CURR_UCLK:
709 *value = metrics->CurrClock[PPCLK_UCLK];
710 break;
711 case METRICS_CURR_VCLK:
712 *value = metrics->CurrClock[PPCLK_VCLK_0];
713 break;
714 case METRICS_CURR_DCLK:
715 *value = metrics->CurrClock[PPCLK_DCLK_0];
716 break;
717 case METRICS_CURR_FCLK:
718 *value = metrics->CurrClock[PPCLK_FCLK];
719 break;
720 case METRICS_CURR_DCEFCLK:
721 *value = metrics->CurrClock[PPCLK_DCFCLK];
722 break;
723 case METRICS_AVERAGE_GFXCLK:
724 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
725 *value = metrics->AverageGfxclkFrequencyPostDs;
726 else
727 *value = metrics->AverageGfxclkFrequencyPreDs;
728 break;
729 case METRICS_AVERAGE_FCLK:
730 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
731 *value = metrics->AverageFclkFrequencyPostDs;
732 else
733 *value = metrics->AverageFclkFrequencyPreDs;
734 break;
735 case METRICS_AVERAGE_UCLK:
736 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
737 *value = metrics->AverageMemclkFrequencyPostDs;
738 else
739 *value = metrics->AverageMemclkFrequencyPreDs;
740 break;
741 case METRICS_AVERAGE_VCLK:
742 *value = metrics->AverageVclk0Frequency;
743 break;
744 case METRICS_AVERAGE_DCLK:
745 *value = metrics->AverageDclk0Frequency;
746 break;
747 case METRICS_AVERAGE_VCLK1:
748 *value = metrics->AverageVclk1Frequency;
749 break;
750 case METRICS_AVERAGE_DCLK1:
751 *value = metrics->AverageDclk1Frequency;
752 break;
753 case METRICS_AVERAGE_GFXACTIVITY:
754 *value = metrics->AverageGfxActivity;
755 break;
756 case METRICS_AVERAGE_MEMACTIVITY:
757 *value = metrics->AverageUclkActivity;
758 break;
759 case METRICS_AVERAGE_SOCKETPOWER:
760 *value = metrics->AverageSocketPower << 8;
761 break;
762 case METRICS_TEMPERATURE_EDGE:
763 *value = metrics->AvgTemperature[TEMP_EDGE] *
764 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
765 break;
766 case METRICS_TEMPERATURE_HOTSPOT:
767 *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
768 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
769 break;
770 case METRICS_TEMPERATURE_MEM:
771 *value = metrics->AvgTemperature[TEMP_MEM] *
772 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
773 break;
774 case METRICS_TEMPERATURE_VRGFX:
775 *value = metrics->AvgTemperature[TEMP_VR_GFX] *
776 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
777 break;
778 case METRICS_TEMPERATURE_VRSOC:
779 *value = metrics->AvgTemperature[TEMP_VR_SOC] *
780 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
781 break;
782 case METRICS_THROTTLER_STATUS:
783 *value = smu_v14_0_2_get_throttler_status(metrics);
784 break;
785 case METRICS_CURR_FANSPEED:
786 *value = metrics->AvgFanRpm;
787 break;
788 case METRICS_CURR_FANPWM:
789 *value = metrics->AvgFanPwm;
790 break;
791 case METRICS_VOLTAGE_VDDGFX:
792 *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
793 break;
794 case METRICS_PCIE_RATE:
795 *value = metrics->PcieRate;
796 break;
797 case METRICS_PCIE_WIDTH:
798 *value = metrics->PcieWidth;
799 break;
800 default:
801 *value = UINT_MAX;
802 break;
803 }
804
805 return ret;
806 }
807
smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)808 static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu,
809 enum smu_clk_type clk_type,
810 uint32_t *min,
811 uint32_t *max)
812 {
813 struct smu_14_0_dpm_context *dpm_context =
814 smu->smu_dpm.dpm_context;
815 struct smu_14_0_dpm_table *dpm_table;
816
817 switch (clk_type) {
818 case SMU_MCLK:
819 case SMU_UCLK:
820 /* uclk dpm table */
821 dpm_table = &dpm_context->dpm_tables.uclk_table;
822 break;
823 case SMU_GFXCLK:
824 case SMU_SCLK:
825 /* gfxclk dpm table */
826 dpm_table = &dpm_context->dpm_tables.gfx_table;
827 break;
828 case SMU_SOCCLK:
829 /* socclk dpm table */
830 dpm_table = &dpm_context->dpm_tables.soc_table;
831 break;
832 case SMU_FCLK:
833 /* fclk dpm table */
834 dpm_table = &dpm_context->dpm_tables.fclk_table;
835 break;
836 case SMU_VCLK:
837 case SMU_VCLK1:
838 /* vclk dpm table */
839 dpm_table = &dpm_context->dpm_tables.vclk_table;
840 break;
841 case SMU_DCLK:
842 case SMU_DCLK1:
843 /* dclk dpm table */
844 dpm_table = &dpm_context->dpm_tables.dclk_table;
845 break;
846 default:
847 dev_err(smu->adev->dev, "Unsupported clock type!\n");
848 return -EINVAL;
849 }
850
851 if (min)
852 *min = dpm_table->min;
853 if (max)
854 *max = dpm_table->max;
855
856 return 0;
857 }
858
smu_v14_0_2_read_sensor(struct smu_context * smu,enum amd_pp_sensors sensor,void * data,uint32_t * size)859 static int smu_v14_0_2_read_sensor(struct smu_context *smu,
860 enum amd_pp_sensors sensor,
861 void *data,
862 uint32_t *size)
863 {
864 struct smu_table_context *table_context = &smu->smu_table;
865 PPTable_t *smc_pptable = table_context->driver_pptable;
866 int ret = 0;
867
868 switch (sensor) {
869 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
870 *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm;
871 *size = 4;
872 break;
873 case AMDGPU_PP_SENSOR_MEM_LOAD:
874 ret = smu_v14_0_2_get_smu_metrics_data(smu,
875 METRICS_AVERAGE_MEMACTIVITY,
876 (uint32_t *)data);
877 *size = 4;
878 break;
879 case AMDGPU_PP_SENSOR_GPU_LOAD:
880 ret = smu_v14_0_2_get_smu_metrics_data(smu,
881 METRICS_AVERAGE_GFXACTIVITY,
882 (uint32_t *)data);
883 *size = 4;
884 break;
885 case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
886 ret = smu_v14_0_2_get_smu_metrics_data(smu,
887 METRICS_AVERAGE_SOCKETPOWER,
888 (uint32_t *)data);
889 *size = 4;
890 break;
891 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
892 ret = smu_v14_0_2_get_smu_metrics_data(smu,
893 METRICS_TEMPERATURE_HOTSPOT,
894 (uint32_t *)data);
895 *size = 4;
896 break;
897 case AMDGPU_PP_SENSOR_EDGE_TEMP:
898 ret = smu_v14_0_2_get_smu_metrics_data(smu,
899 METRICS_TEMPERATURE_EDGE,
900 (uint32_t *)data);
901 *size = 4;
902 break;
903 case AMDGPU_PP_SENSOR_MEM_TEMP:
904 ret = smu_v14_0_2_get_smu_metrics_data(smu,
905 METRICS_TEMPERATURE_MEM,
906 (uint32_t *)data);
907 *size = 4;
908 break;
909 case AMDGPU_PP_SENSOR_GFX_MCLK:
910 ret = smu_v14_0_2_get_smu_metrics_data(smu,
911 METRICS_CURR_UCLK,
912 (uint32_t *)data);
913 *(uint32_t *)data *= 100;
914 *size = 4;
915 break;
916 case AMDGPU_PP_SENSOR_GFX_SCLK:
917 ret = smu_v14_0_2_get_smu_metrics_data(smu,
918 METRICS_AVERAGE_GFXCLK,
919 (uint32_t *)data);
920 *(uint32_t *)data *= 100;
921 *size = 4;
922 break;
923 case AMDGPU_PP_SENSOR_VDDGFX:
924 ret = smu_v14_0_2_get_smu_metrics_data(smu,
925 METRICS_VOLTAGE_VDDGFX,
926 (uint32_t *)data);
927 *size = 4;
928 break;
929 default:
930 ret = -EOPNOTSUPP;
931 break;
932 }
933
934 return ret;
935 }
936
smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)937 static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu,
938 enum smu_clk_type clk_type,
939 uint32_t *value)
940 {
941 MetricsMember_t member_type;
942 int clk_id = 0;
943
944 clk_id = smu_cmn_to_asic_specific_index(smu,
945 CMN2ASIC_MAPPING_CLK,
946 clk_type);
947 if (clk_id < 0)
948 return -EINVAL;
949
950 switch (clk_id) {
951 case PPCLK_GFXCLK:
952 member_type = METRICS_AVERAGE_GFXCLK;
953 break;
954 case PPCLK_UCLK:
955 member_type = METRICS_CURR_UCLK;
956 break;
957 case PPCLK_FCLK:
958 member_type = METRICS_CURR_FCLK;
959 break;
960 case PPCLK_SOCCLK:
961 member_type = METRICS_CURR_SOCCLK;
962 break;
963 case PPCLK_VCLK_0:
964 member_type = METRICS_AVERAGE_VCLK;
965 break;
966 case PPCLK_DCLK_0:
967 member_type = METRICS_AVERAGE_DCLK;
968 break;
969 case PPCLK_DCFCLK:
970 member_type = METRICS_CURR_DCEFCLK;
971 break;
972 default:
973 return -EINVAL;
974 }
975
976 return smu_v14_0_2_get_smu_metrics_data(smu,
977 member_type,
978 value);
979 }
980
smu_v14_0_2_is_od_feature_supported(struct smu_context * smu,int od_feature_bit)981 static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu,
982 int od_feature_bit)
983 {
984 PPTable_t *pptable = smu->smu_table.driver_pptable;
985 const OverDriveLimits_t * const overdrive_upperlimits =
986 &pptable->SkuTable.OverDriveLimitsBasicMax;
987
988 return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
989 }
990
smu_v14_0_2_get_od_setting_limits(struct smu_context * smu,int od_feature_bit,int32_t * min,int32_t * max)991 static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
992 int od_feature_bit,
993 int32_t *min,
994 int32_t *max)
995 {
996 PPTable_t *pptable = smu->smu_table.driver_pptable;
997 const OverDriveLimits_t * const overdrive_upperlimits =
998 &pptable->SkuTable.OverDriveLimitsBasicMax;
999 const OverDriveLimits_t * const overdrive_lowerlimits =
1000 &pptable->SkuTable.OverDriveLimitsBasicMin;
1001 int32_t od_min_setting, od_max_setting;
1002
1003 switch (od_feature_bit) {
1004 case PP_OD_FEATURE_GFXCLK_FMIN:
1005 case PP_OD_FEATURE_GFXCLK_FMAX:
1006 od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
1007 od_max_setting = overdrive_upperlimits->GfxclkFoffset;
1008 break;
1009 case PP_OD_FEATURE_UCLK_FMIN:
1010 od_min_setting = overdrive_lowerlimits->UclkFmin;
1011 od_max_setting = overdrive_upperlimits->UclkFmin;
1012 break;
1013 case PP_OD_FEATURE_UCLK_FMAX:
1014 od_min_setting = overdrive_lowerlimits->UclkFmax;
1015 od_max_setting = overdrive_upperlimits->UclkFmax;
1016 break;
1017 case PP_OD_FEATURE_GFX_VF_CURVE:
1018 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0];
1019 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0];
1020 break;
1021 case PP_OD_FEATURE_FAN_CURVE_TEMP:
1022 od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0];
1023 od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0];
1024 break;
1025 case PP_OD_FEATURE_FAN_CURVE_PWM:
1026 od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0];
1027 od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0];
1028 break;
1029 case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
1030 od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
1031 od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
1032 break;
1033 case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
1034 od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
1035 od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
1036 break;
1037 case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
1038 od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
1039 od_max_setting = overdrive_upperlimits->FanTargetTemperature;
1040 break;
1041 case PP_OD_FEATURE_FAN_MINIMUM_PWM:
1042 od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
1043 od_max_setting = overdrive_upperlimits->FanMinimumPwm;
1044 break;
1045 default:
1046 od_min_setting = od_max_setting = INT_MAX;
1047 break;
1048 }
1049
1050 if (min)
1051 *min = od_min_setting;
1052 if (max)
1053 *max = od_max_setting;
1054 }
1055
smu_v14_0_2_print_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)1056 static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
1057 enum smu_clk_type clk_type,
1058 char *buf)
1059 {
1060 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1061 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1062 OverDriveTableExternal_t *od_table =
1063 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
1064 struct smu_14_0_dpm_table *single_dpm_table;
1065 struct smu_14_0_pcie_table *pcie_table;
1066 uint32_t gen_speed, lane_width;
1067 int i, curr_freq, size = 0;
1068 int32_t min_value, max_value;
1069 int ret = 0;
1070
1071 smu_cmn_get_sysfs_buf(&buf, &size);
1072
1073 if (amdgpu_ras_intr_triggered()) {
1074 size += sysfs_emit_at(buf, size, "unavailable\n");
1075 return size;
1076 }
1077
1078 switch (clk_type) {
1079 case SMU_SCLK:
1080 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1081 break;
1082 case SMU_MCLK:
1083 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1084 break;
1085 case SMU_SOCCLK:
1086 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1087 break;
1088 case SMU_FCLK:
1089 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1090 break;
1091 case SMU_VCLK:
1092 case SMU_VCLK1:
1093 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1094 break;
1095 case SMU_DCLK:
1096 case SMU_DCLK1:
1097 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1098 break;
1099 case SMU_DCEFCLK:
1100 single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
1101 break;
1102 default:
1103 break;
1104 }
1105
1106 switch (clk_type) {
1107 case SMU_SCLK:
1108 case SMU_MCLK:
1109 case SMU_SOCCLK:
1110 case SMU_FCLK:
1111 case SMU_VCLK:
1112 case SMU_VCLK1:
1113 case SMU_DCLK:
1114 case SMU_DCLK1:
1115 case SMU_DCEFCLK:
1116 ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1117 if (ret) {
1118 dev_err(smu->adev->dev, "Failed to get current clock freq!");
1119 return ret;
1120 }
1121
1122 if (single_dpm_table->is_fine_grained) {
1123 /*
1124 * For fine grained dpms, there are only two dpm levels:
1125 * - level 0 -> min clock freq
1126 * - level 1 -> max clock freq
1127 * And the current clock frequency can be any value between them.
1128 * So, if the current clock frequency is not at level 0 or level 1,
1129 * we will fake it as three dpm levels:
1130 * - level 0 -> min clock freq
1131 * - level 1 -> current actual clock freq
1132 * - level 2 -> max clock freq
1133 */
1134 if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1135 (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1136 size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1137 single_dpm_table->dpm_levels[0].value);
1138 size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1139 curr_freq);
1140 size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1141 single_dpm_table->dpm_levels[1].value);
1142 } else {
1143 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1144 single_dpm_table->dpm_levels[0].value,
1145 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1146 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1147 single_dpm_table->dpm_levels[1].value,
1148 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1149 }
1150 } else {
1151 for (i = 0; i < single_dpm_table->count; i++)
1152 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1153 i, single_dpm_table->dpm_levels[i].value,
1154 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1155 }
1156 break;
1157 case SMU_PCIE:
1158 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1159 METRICS_PCIE_RATE,
1160 &gen_speed);
1161 if (ret)
1162 return ret;
1163
1164 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1165 METRICS_PCIE_WIDTH,
1166 &lane_width);
1167 if (ret)
1168 return ret;
1169
1170 pcie_table = &(dpm_context->dpm_tables.pcie_table);
1171 for (i = 0; i < pcie_table->num_of_link_levels; i++)
1172 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1173 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1174 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1175 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1176 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
1177 (pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," : "",
1178 (pcie_table->pcie_lane[i] == 1) ? "x1" :
1179 (pcie_table->pcie_lane[i] == 2) ? "x2" :
1180 (pcie_table->pcie_lane[i] == 3) ? "x4" :
1181 (pcie_table->pcie_lane[i] == 4) ? "x8" :
1182 (pcie_table->pcie_lane[i] == 5) ? "x12" :
1183 (pcie_table->pcie_lane[i] == 6) ? "x16" :
1184 (pcie_table->pcie_lane[i] == 7) ? "x32" : "",
1185 pcie_table->clk_freq[i],
1186 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
1187 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
1188 "*" : "");
1189 break;
1190
1191 case SMU_OD_SCLK:
1192 if (!smu_v14_0_2_is_od_feature_supported(smu,
1193 PP_OD_FEATURE_GFXCLK_BIT))
1194 break;
1195
1196 size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
1197 size += sysfs_emit_at(buf, size, "%dMhz\n",
1198 od_table->OverDriveTable.GfxclkFoffset);
1199 break;
1200
1201 case SMU_OD_MCLK:
1202 if (!smu_v14_0_2_is_od_feature_supported(smu,
1203 PP_OD_FEATURE_UCLK_BIT))
1204 break;
1205
1206 size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
1207 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
1208 od_table->OverDriveTable.UclkFmin,
1209 od_table->OverDriveTable.UclkFmax);
1210 break;
1211
1212 case SMU_OD_VDDGFX_OFFSET:
1213 if (!smu_v14_0_2_is_od_feature_supported(smu,
1214 PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1215 break;
1216
1217 size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
1218 size += sysfs_emit_at(buf, size, "%dmV\n",
1219 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
1220 break;
1221
1222 case SMU_OD_FAN_CURVE:
1223 if (!smu_v14_0_2_is_od_feature_supported(smu,
1224 PP_OD_FEATURE_FAN_CURVE_BIT))
1225 break;
1226
1227 size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
1228 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
1229 size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
1230 i,
1231 (int)od_table->OverDriveTable.FanLinearTempPoints[i],
1232 (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
1233
1234 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1235 smu_v14_0_2_get_od_setting_limits(smu,
1236 PP_OD_FEATURE_FAN_CURVE_TEMP,
1237 &min_value,
1238 &max_value);
1239 size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
1240 min_value, max_value);
1241
1242 smu_v14_0_2_get_od_setting_limits(smu,
1243 PP_OD_FEATURE_FAN_CURVE_PWM,
1244 &min_value,
1245 &max_value);
1246 size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
1247 min_value, max_value);
1248
1249 break;
1250
1251 case SMU_OD_ACOUSTIC_LIMIT:
1252 if (!smu_v14_0_2_is_od_feature_supported(smu,
1253 PP_OD_FEATURE_FAN_CURVE_BIT))
1254 break;
1255
1256 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
1257 size += sysfs_emit_at(buf, size, "%d\n",
1258 (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
1259
1260 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1261 smu_v14_0_2_get_od_setting_limits(smu,
1262 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
1263 &min_value,
1264 &max_value);
1265 size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
1266 min_value, max_value);
1267 break;
1268
1269 case SMU_OD_ACOUSTIC_TARGET:
1270 if (!smu_v14_0_2_is_od_feature_supported(smu,
1271 PP_OD_FEATURE_FAN_CURVE_BIT))
1272 break;
1273
1274 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
1275 size += sysfs_emit_at(buf, size, "%d\n",
1276 (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
1277
1278 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1279 smu_v14_0_2_get_od_setting_limits(smu,
1280 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
1281 &min_value,
1282 &max_value);
1283 size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
1284 min_value, max_value);
1285 break;
1286
1287 case SMU_OD_FAN_TARGET_TEMPERATURE:
1288 if (!smu_v14_0_2_is_od_feature_supported(smu,
1289 PP_OD_FEATURE_FAN_CURVE_BIT))
1290 break;
1291
1292 size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
1293 size += sysfs_emit_at(buf, size, "%d\n",
1294 (int)od_table->OverDriveTable.FanTargetTemperature);
1295
1296 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1297 smu_v14_0_2_get_od_setting_limits(smu,
1298 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
1299 &min_value,
1300 &max_value);
1301 size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
1302 min_value, max_value);
1303 break;
1304
1305 case SMU_OD_FAN_MINIMUM_PWM:
1306 if (!smu_v14_0_2_is_od_feature_supported(smu,
1307 PP_OD_FEATURE_FAN_CURVE_BIT))
1308 break;
1309
1310 size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
1311 size += sysfs_emit_at(buf, size, "%d\n",
1312 (int)od_table->OverDriveTable.FanMinimumPwm);
1313
1314 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1315 smu_v14_0_2_get_od_setting_limits(smu,
1316 PP_OD_FEATURE_FAN_MINIMUM_PWM,
1317 &min_value,
1318 &max_value);
1319 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
1320 min_value, max_value);
1321 break;
1322
1323 case SMU_OD_RANGE:
1324 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
1325 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
1326 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1327 break;
1328
1329 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1330
1331 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1332 smu_v14_0_2_get_od_setting_limits(smu,
1333 PP_OD_FEATURE_GFXCLK_FMAX,
1334 &min_value,
1335 &max_value);
1336 size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
1337 min_value, max_value);
1338 }
1339
1340 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1341 smu_v14_0_2_get_od_setting_limits(smu,
1342 PP_OD_FEATURE_UCLK_FMIN,
1343 &min_value,
1344 NULL);
1345 smu_v14_0_2_get_od_setting_limits(smu,
1346 PP_OD_FEATURE_UCLK_FMAX,
1347 NULL,
1348 &max_value);
1349 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
1350 min_value, max_value);
1351 }
1352
1353 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1354 smu_v14_0_2_get_od_setting_limits(smu,
1355 PP_OD_FEATURE_GFX_VF_CURVE,
1356 &min_value,
1357 &max_value);
1358 size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
1359 min_value, max_value);
1360 }
1361 break;
1362
1363 default:
1364 break;
1365 }
1366
1367 return size;
1368 }
1369
smu_v14_0_2_force_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)1370 static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
1371 enum smu_clk_type clk_type,
1372 uint32_t mask)
1373 {
1374 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1375 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1376 struct smu_14_0_dpm_table *single_dpm_table;
1377 uint32_t soft_min_level, soft_max_level;
1378 uint32_t min_freq, max_freq;
1379 int ret = 0;
1380
1381 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1382 soft_max_level = mask ? (fls(mask) - 1) : 0;
1383
1384 switch (clk_type) {
1385 case SMU_GFXCLK:
1386 case SMU_SCLK:
1387 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1388 break;
1389 case SMU_MCLK:
1390 case SMU_UCLK:
1391 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1392 break;
1393 case SMU_SOCCLK:
1394 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1395 break;
1396 case SMU_FCLK:
1397 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1398 break;
1399 case SMU_VCLK:
1400 case SMU_VCLK1:
1401 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1402 break;
1403 case SMU_DCLK:
1404 case SMU_DCLK1:
1405 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1406 break;
1407 default:
1408 break;
1409 }
1410
1411 switch (clk_type) {
1412 case SMU_GFXCLK:
1413 case SMU_SCLK:
1414 case SMU_MCLK:
1415 case SMU_UCLK:
1416 case SMU_SOCCLK:
1417 case SMU_FCLK:
1418 case SMU_VCLK:
1419 case SMU_VCLK1:
1420 case SMU_DCLK:
1421 case SMU_DCLK1:
1422 if (single_dpm_table->is_fine_grained) {
1423 /* There is only 2 levels for fine grained DPM */
1424 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1425 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1426 } else {
1427 if ((soft_max_level >= single_dpm_table->count) ||
1428 (soft_min_level >= single_dpm_table->count))
1429 return -EINVAL;
1430 }
1431
1432 min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
1433 max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
1434
1435 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1436 clk_type,
1437 min_freq,
1438 max_freq,
1439 false);
1440 break;
1441 case SMU_DCEFCLK:
1442 case SMU_PCIE:
1443 default:
1444 break;
1445 }
1446
1447 return ret;
1448 }
1449
smu_v14_0_2_update_pcie_parameters(struct smu_context * smu,uint8_t pcie_gen_cap,uint8_t pcie_width_cap)1450 static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
1451 uint8_t pcie_gen_cap,
1452 uint8_t pcie_width_cap)
1453 {
1454 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1455 struct smu_14_0_pcie_table *pcie_table =
1456 &dpm_context->dpm_tables.pcie_table;
1457 int num_of_levels = pcie_table->num_of_link_levels;
1458 uint32_t smu_pcie_arg;
1459 int ret, i;
1460
1461 if (!num_of_levels)
1462 return 0;
1463
1464 if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
1465 if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
1466 pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
1467
1468 if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
1469 pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
1470
1471 /* Force all levels to use the same settings */
1472 for (i = 0; i < num_of_levels; i++) {
1473 pcie_table->pcie_gen[i] = pcie_gen_cap;
1474 pcie_table->pcie_lane[i] = pcie_width_cap;
1475 }
1476 } else {
1477 for (i = 0; i < num_of_levels; i++) {
1478 if (pcie_table->pcie_gen[i] > pcie_gen_cap)
1479 pcie_table->pcie_gen[i] = pcie_gen_cap;
1480 if (pcie_table->pcie_lane[i] > pcie_width_cap)
1481 pcie_table->pcie_lane[i] = pcie_width_cap;
1482 }
1483 }
1484
1485 for (i = 0; i < num_of_levels; i++) {
1486 smu_pcie_arg = i << 16;
1487 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
1488 smu_pcie_arg |= pcie_table->pcie_lane[i];
1489
1490 ret = smu_cmn_send_smc_msg_with_param(smu,
1491 SMU_MSG_OverridePcieParameters,
1492 smu_pcie_arg,
1493 NULL);
1494 if (ret)
1495 return ret;
1496 }
1497
1498 return 0;
1499 }
1500
1501 static const struct smu_temperature_range smu14_thermal_policy[] = {
1502 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
1503 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
1504 };
1505
smu_v14_0_2_get_thermal_temperature_range(struct smu_context * smu,struct smu_temperature_range * range)1506 static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu,
1507 struct smu_temperature_range *range)
1508 {
1509 struct smu_table_context *table_context = &smu->smu_table;
1510 struct smu_14_0_2_powerplay_table *powerplay_table =
1511 table_context->power_play_table;
1512 PPTable_t *pptable = smu->smu_table.driver_pptable;
1513
1514 if (amdgpu_sriov_vf(smu->adev))
1515 return 0;
1516
1517 if (!range)
1518 return -EINVAL;
1519
1520 memcpy(range, &smu14_thermal_policy[0], sizeof(struct smu_temperature_range));
1521
1522 range->max = pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] *
1523 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1524 range->edge_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
1525 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1526 range->hotspot_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] *
1527 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1528 range->hotspot_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
1529 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1530 range->mem_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] *
1531 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1532 range->mem_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
1533 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1534 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1535 range->software_shutdown_temp_offset = pptable->CustomSkuTable.FanAbnormalTempLimitOffset;
1536
1537 return 0;
1538 }
1539
smu_v14_0_2_populate_umd_state_clk(struct smu_context * smu)1540 static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu)
1541 {
1542 struct smu_14_0_dpm_context *dpm_context =
1543 smu->smu_dpm.dpm_context;
1544 struct smu_14_0_dpm_table *gfx_table =
1545 &dpm_context->dpm_tables.gfx_table;
1546 struct smu_14_0_dpm_table *mem_table =
1547 &dpm_context->dpm_tables.uclk_table;
1548 struct smu_14_0_dpm_table *soc_table =
1549 &dpm_context->dpm_tables.soc_table;
1550 struct smu_14_0_dpm_table *vclk_table =
1551 &dpm_context->dpm_tables.vclk_table;
1552 struct smu_14_0_dpm_table *dclk_table =
1553 &dpm_context->dpm_tables.dclk_table;
1554 struct smu_14_0_dpm_table *fclk_table =
1555 &dpm_context->dpm_tables.fclk_table;
1556 struct smu_umd_pstate_table *pstate_table =
1557 &smu->pstate_table;
1558 struct smu_table_context *table_context = &smu->smu_table;
1559 PPTable_t *pptable = table_context->driver_pptable;
1560 DriverReportedClocks_t driver_clocks =
1561 pptable->SkuTable.DriverReportedClocks;
1562
1563 pstate_table->gfxclk_pstate.min = gfx_table->min;
1564 if (driver_clocks.GameClockAc &&
1565 (driver_clocks.GameClockAc < gfx_table->max))
1566 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
1567 else
1568 pstate_table->gfxclk_pstate.peak = gfx_table->max;
1569
1570 pstate_table->uclk_pstate.min = mem_table->min;
1571 pstate_table->uclk_pstate.peak = mem_table->max;
1572
1573 pstate_table->socclk_pstate.min = soc_table->min;
1574 pstate_table->socclk_pstate.peak = soc_table->max;
1575
1576 pstate_table->vclk_pstate.min = vclk_table->min;
1577 pstate_table->vclk_pstate.peak = vclk_table->max;
1578
1579 pstate_table->dclk_pstate.min = dclk_table->min;
1580 pstate_table->dclk_pstate.peak = dclk_table->max;
1581
1582 pstate_table->fclk_pstate.min = fclk_table->min;
1583 pstate_table->fclk_pstate.peak = fclk_table->max;
1584
1585 if (driver_clocks.BaseClockAc &&
1586 driver_clocks.BaseClockAc < gfx_table->max)
1587 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
1588 else
1589 pstate_table->gfxclk_pstate.standard = gfx_table->max;
1590 pstate_table->uclk_pstate.standard = mem_table->max;
1591 pstate_table->socclk_pstate.standard = soc_table->min;
1592 pstate_table->vclk_pstate.standard = vclk_table->min;
1593 pstate_table->dclk_pstate.standard = dclk_table->min;
1594 pstate_table->fclk_pstate.standard = fclk_table->min;
1595
1596 return 0;
1597 }
1598
smu_v14_0_2_get_unique_id(struct smu_context * smu)1599 static void smu_v14_0_2_get_unique_id(struct smu_context *smu)
1600 {
1601 struct smu_table_context *smu_table = &smu->smu_table;
1602 SmuMetrics_t *metrics =
1603 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
1604 struct amdgpu_device *adev = smu->adev;
1605 uint32_t upper32 = 0, lower32 = 0;
1606 int ret;
1607
1608 ret = smu_cmn_get_metrics_table(smu, NULL, false);
1609 if (ret)
1610 goto out;
1611
1612 upper32 = metrics->PublicSerialNumberUpper;
1613 lower32 = metrics->PublicSerialNumberLower;
1614
1615 out:
1616 adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1617 }
1618
smu_v14_0_2_get_fan_speed_pwm(struct smu_context * smu,uint32_t * speed)1619 static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
1620 uint32_t *speed)
1621 {
1622 int ret;
1623
1624 if (!speed)
1625 return -EINVAL;
1626
1627 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1628 METRICS_CURR_FANPWM,
1629 speed);
1630 if (ret) {
1631 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
1632 return ret;
1633 }
1634
1635 /* Convert the PMFW output which is in percent to pwm(255) based */
1636 *speed = min(*speed * 255 / 100, (uint32_t)255);
1637
1638 return 0;
1639 }
1640
smu_v14_0_2_get_fan_speed_rpm(struct smu_context * smu,uint32_t * speed)1641 static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
1642 uint32_t *speed)
1643 {
1644 if (!speed)
1645 return -EINVAL;
1646
1647 return smu_v14_0_2_get_smu_metrics_data(smu,
1648 METRICS_CURR_FANSPEED,
1649 speed);
1650 }
1651
smu_v14_0_2_get_power_limit(struct smu_context * smu,uint32_t * current_power_limit,uint32_t * default_power_limit,uint32_t * max_power_limit,uint32_t * min_power_limit)1652 static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
1653 uint32_t *current_power_limit,
1654 uint32_t *default_power_limit,
1655 uint32_t *max_power_limit,
1656 uint32_t *min_power_limit)
1657 {
1658 struct smu_table_context *table_context = &smu->smu_table;
1659 PPTable_t *pptable = table_context->driver_pptable;
1660 CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
1661 uint32_t power_limit;
1662 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
1663
1664 if (smu_v14_0_get_current_power_limit(smu, &power_limit))
1665 power_limit = smu->adev->pm.ac_power ?
1666 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
1667 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
1668
1669 if (current_power_limit)
1670 *current_power_limit = power_limit;
1671 if (default_power_limit)
1672 *default_power_limit = power_limit;
1673
1674 if (max_power_limit)
1675 *max_power_limit = msg_limit;
1676
1677 if (min_power_limit)
1678 *min_power_limit = 0;
1679
1680 return 0;
1681 }
1682
smu_v14_0_2_get_power_profile_mode(struct smu_context * smu,char * buf)1683 static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
1684 char *buf)
1685 {
1686 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1687 DpmActivityMonitorCoeffInt_t *activity_monitor =
1688 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1689 static const char *title[] = {
1690 "PROFILE_INDEX(NAME)",
1691 "CLOCK_TYPE(NAME)",
1692 "FPS",
1693 "MinActiveFreqType",
1694 "MinActiveFreq",
1695 "BoosterFreqType",
1696 "BoosterFreq",
1697 "PD_Data_limit_c",
1698 "PD_Data_error_coeff",
1699 "PD_Data_error_rate_coeff"};
1700 int16_t workload_type = 0;
1701 uint32_t i, size = 0;
1702 int result = 0;
1703
1704 if (!buf)
1705 return -EINVAL;
1706
1707 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
1708 title[0], title[1], title[2], title[3], title[4], title[5],
1709 title[6], title[7], title[8], title[9]);
1710
1711 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
1712 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1713 workload_type = smu_cmn_to_asic_specific_index(smu,
1714 CMN2ASIC_MAPPING_WORKLOAD,
1715 i);
1716 if (workload_type == -ENOTSUPP)
1717 continue;
1718 else if (workload_type < 0)
1719 return -EINVAL;
1720
1721 result = smu_cmn_update_table(smu,
1722 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1723 workload_type,
1724 (void *)(&activity_monitor_external),
1725 false);
1726 if (result) {
1727 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1728 return result;
1729 }
1730
1731 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
1732 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1733
1734 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1735 " ",
1736 0,
1737 "GFXCLK",
1738 activity_monitor->Gfx_FPS,
1739 activity_monitor->Gfx_MinActiveFreqType,
1740 activity_monitor->Gfx_MinActiveFreq,
1741 activity_monitor->Gfx_BoosterFreqType,
1742 activity_monitor->Gfx_BoosterFreq,
1743 activity_monitor->Gfx_PD_Data_limit_c,
1744 activity_monitor->Gfx_PD_Data_error_coeff,
1745 activity_monitor->Gfx_PD_Data_error_rate_coeff);
1746
1747 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1748 " ",
1749 1,
1750 "FCLK",
1751 activity_monitor->Fclk_FPS,
1752 activity_monitor->Fclk_MinActiveFreqType,
1753 activity_monitor->Fclk_MinActiveFreq,
1754 activity_monitor->Fclk_BoosterFreqType,
1755 activity_monitor->Fclk_BoosterFreq,
1756 activity_monitor->Fclk_PD_Data_limit_c,
1757 activity_monitor->Fclk_PD_Data_error_coeff,
1758 activity_monitor->Fclk_PD_Data_error_rate_coeff);
1759 }
1760
1761 return size;
1762 }
1763
1764 #define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9
1765 #define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2
1766 #define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long))
1767
smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context * smu,long * input)1768 static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
1769 long *input)
1770 {
1771 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1772 DpmActivityMonitorCoeffInt_t *activity_monitor =
1773 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1774 int ret, idx;
1775
1776 ret = smu_cmn_update_table(smu,
1777 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1778 WORKLOAD_PPLIB_CUSTOM_BIT,
1779 (void *)(&activity_monitor_external),
1780 false);
1781 if (ret) {
1782 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1783 return ret;
1784 }
1785
1786 idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1787 if (input[idx]) {
1788 /* Gfxclk */
1789 activity_monitor->Gfx_FPS = input[idx + 1];
1790 activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
1791 activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
1792 activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
1793 activity_monitor->Gfx_BoosterFreq = input[idx + 5];
1794 activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
1795 activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
1796 activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
1797 }
1798 idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1799 if (input[idx]) {
1800 /* Fclk */
1801 activity_monitor->Fclk_FPS = input[idx + 1];
1802 activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
1803 activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
1804 activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
1805 activity_monitor->Fclk_BoosterFreq = input[idx + 5];
1806 activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
1807 activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
1808 activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
1809 }
1810
1811 ret = smu_cmn_update_table(smu,
1812 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1813 WORKLOAD_PPLIB_CUSTOM_BIT,
1814 (void *)(&activity_monitor_external),
1815 true);
1816 if (ret) {
1817 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
1818 return ret;
1819 }
1820
1821 return ret;
1822 }
1823
smu_v14_0_2_set_power_profile_mode(struct smu_context * smu,u32 workload_mask,long * custom_params,u32 custom_params_max_idx)1824 static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
1825 u32 workload_mask,
1826 long *custom_params,
1827 u32 custom_params_max_idx)
1828 {
1829 u32 backend_workload_mask = 0;
1830 int ret, idx = -1, i;
1831
1832 smu_cmn_get_backend_workload_mask(smu, workload_mask,
1833 &backend_workload_mask);
1834
1835 /* disable deep sleep if compute is enabled */
1836 if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
1837 smu_v14_0_deep_sleep_control(smu, false);
1838 else
1839 smu_v14_0_deep_sleep_control(smu, true);
1840
1841 if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
1842 if (!smu->custom_profile_params) {
1843 smu->custom_profile_params =
1844 kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
1845 if (!smu->custom_profile_params)
1846 return -ENOMEM;
1847 }
1848 if (custom_params && custom_params_max_idx) {
1849 if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT)
1850 return -EINVAL;
1851 if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT)
1852 return -EINVAL;
1853 idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1854 smu->custom_profile_params[idx] = 1;
1855 for (i = 1; i < custom_params_max_idx; i++)
1856 smu->custom_profile_params[idx + i] = custom_params[i];
1857 }
1858 ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
1859 smu->custom_profile_params);
1860 if (ret) {
1861 if (idx != -1)
1862 smu->custom_profile_params[idx] = 0;
1863 return ret;
1864 }
1865 } else if (smu->custom_profile_params) {
1866 memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE);
1867 }
1868
1869 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1870 backend_workload_mask, NULL);
1871 if (ret) {
1872 dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
1873 workload_mask);
1874 if (idx != -1)
1875 smu->custom_profile_params[idx] = 0;
1876 return ret;
1877 }
1878
1879 return ret;
1880 }
1881
smu_v14_0_2_baco_enter(struct smu_context * smu)1882 static int smu_v14_0_2_baco_enter(struct smu_context *smu)
1883 {
1884 struct smu_baco_context *smu_baco = &smu->smu_baco;
1885 struct amdgpu_device *adev = smu->adev;
1886
1887 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
1888 return smu_v14_0_baco_set_armd3_sequence(smu,
1889 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
1890 else
1891 return smu_v14_0_baco_enter(smu);
1892 }
1893
smu_v14_0_2_baco_exit(struct smu_context * smu)1894 static int smu_v14_0_2_baco_exit(struct smu_context *smu)
1895 {
1896 struct amdgpu_device *adev = smu->adev;
1897
1898 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
1899 /* Wait for PMFW handling for the Dstate change */
1900 usleep_range(10000, 11000);
1901 return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
1902 } else {
1903 return smu_v14_0_baco_exit(smu);
1904 }
1905 }
1906
smu_v14_0_2_is_mode1_reset_supported(struct smu_context * smu)1907 static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu)
1908 {
1909 // TODO
1910
1911 return true;
1912 }
1913
smu_v14_0_2_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msg,int num_msgs)1914 static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap,
1915 struct i2c_msg *msg, int num_msgs)
1916 {
1917 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1918 struct amdgpu_device *adev = smu_i2c->adev;
1919 struct smu_context *smu = adev->powerplay.pp_handle;
1920 struct smu_table_context *smu_table = &smu->smu_table;
1921 struct smu_table *table = &smu_table->driver_table;
1922 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1923 int i, j, r, c;
1924 u16 dir;
1925
1926 if (!adev->pm.dpm_enabled)
1927 return -EBUSY;
1928
1929 req = kzalloc(sizeof(*req), GFP_KERNEL);
1930 if (!req)
1931 return -ENOMEM;
1932
1933 req->I2CcontrollerPort = smu_i2c->port;
1934 req->I2CSpeed = I2C_SPEED_FAST_400K;
1935 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1936 dir = msg[0].flags & I2C_M_RD;
1937
1938 for (c = i = 0; i < num_msgs; i++) {
1939 for (j = 0; j < msg[i].len; j++, c++) {
1940 SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1941
1942 if (!(msg[i].flags & I2C_M_RD)) {
1943 /* write */
1944 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1945 cmd->ReadWriteData = msg[i].buf[j];
1946 }
1947
1948 if ((dir ^ msg[i].flags) & I2C_M_RD) {
1949 /* The direction changes.
1950 */
1951 dir = msg[i].flags & I2C_M_RD;
1952 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1953 }
1954
1955 req->NumCmds++;
1956
1957 /*
1958 * Insert STOP if we are at the last byte of either last
1959 * message for the transaction or the client explicitly
1960 * requires a STOP at this particular message.
1961 */
1962 if ((j == msg[i].len - 1) &&
1963 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
1964 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
1965 cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
1966 }
1967 }
1968 }
1969 mutex_lock(&adev->pm.mutex);
1970 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
1971 mutex_unlock(&adev->pm.mutex);
1972 if (r)
1973 goto fail;
1974
1975 for (c = i = 0; i < num_msgs; i++) {
1976 if (!(msg[i].flags & I2C_M_RD)) {
1977 c += msg[i].len;
1978 continue;
1979 }
1980 for (j = 0; j < msg[i].len; j++, c++) {
1981 SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
1982
1983 msg[i].buf[j] = cmd->ReadWriteData;
1984 }
1985 }
1986 r = num_msgs;
1987 fail:
1988 kfree(req);
1989 return r;
1990 }
1991
smu_v14_0_2_i2c_func(struct i2c_adapter * adap)1992 static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap)
1993 {
1994 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1995 }
1996
1997 static const struct i2c_algorithm smu_v14_0_2_i2c_algo = {
1998 .master_xfer = smu_v14_0_2_i2c_xfer,
1999 .functionality = smu_v14_0_2_i2c_func,
2000 };
2001
2002 static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = {
2003 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
2004 .max_read_len = MAX_SW_I2C_COMMANDS,
2005 .max_write_len = MAX_SW_I2C_COMMANDS,
2006 .max_comb_1st_msg_len = 2,
2007 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
2008 };
2009
smu_v14_0_2_i2c_control_init(struct smu_context * smu)2010 static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
2011 {
2012 struct amdgpu_device *adev = smu->adev;
2013 int res, i;
2014
2015 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2016 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2017 struct i2c_adapter *control = &smu_i2c->adapter;
2018
2019 smu_i2c->adev = adev;
2020 smu_i2c->port = i;
2021 mutex_init(&smu_i2c->mutex);
2022 control->owner = THIS_MODULE;
2023 control->dev.parent = &adev->pdev->dev;
2024 control->algo = &smu_v14_0_2_i2c_algo;
2025 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
2026 control->quirks = &smu_v14_0_2_i2c_control_quirks;
2027 i2c_set_adapdata(control, smu_i2c);
2028
2029 res = i2c_add_adapter(control);
2030 if (res) {
2031 DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
2032 goto Out_err;
2033 }
2034 }
2035
2036 /* assign the buses used for the FRU EEPROM and RAS EEPROM */
2037 /* XXX ideally this would be something in a vbios data table */
2038 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
2039 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
2040
2041 return 0;
2042 Out_err:
2043 for ( ; i >= 0; i--) {
2044 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2045 struct i2c_adapter *control = &smu_i2c->adapter;
2046
2047 i2c_del_adapter(control);
2048 }
2049 return res;
2050 }
2051
smu_v14_0_2_i2c_control_fini(struct smu_context * smu)2052 static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
2053 {
2054 struct amdgpu_device *adev = smu->adev;
2055 int i;
2056
2057 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2058 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2059 struct i2c_adapter *control = &smu_i2c->adapter;
2060
2061 i2c_del_adapter(control);
2062 }
2063 adev->pm.ras_eeprom_i2c_bus = NULL;
2064 adev->pm.fru_eeprom_i2c_bus = NULL;
2065 }
2066
smu_v14_0_2_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)2067 static int smu_v14_0_2_set_mp1_state(struct smu_context *smu,
2068 enum pp_mp1_state mp1_state)
2069 {
2070 int ret;
2071
2072 switch (mp1_state) {
2073 case PP_MP1_STATE_UNLOAD:
2074 ret = smu_cmn_set_mp1_state(smu, mp1_state);
2075 break;
2076 default:
2077 /* Ignore others */
2078 ret = 0;
2079 }
2080
2081 return ret;
2082 }
2083
smu_v14_0_2_set_df_cstate(struct smu_context * smu,enum pp_df_cstate state)2084 static int smu_v14_0_2_set_df_cstate(struct smu_context *smu,
2085 enum pp_df_cstate state)
2086 {
2087 return smu_cmn_send_smc_msg_with_param(smu,
2088 SMU_MSG_DFCstateControl,
2089 state,
2090 NULL);
2091 }
2092
smu_v14_0_2_mode1_reset(struct smu_context * smu)2093 static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
2094 {
2095 int ret = 0;
2096
2097 ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
2098 if (!ret) {
2099 if (amdgpu_emu_mode == 1)
2100 msleep(50000);
2101 else
2102 msleep(1000);
2103 }
2104
2105 return ret;
2106 }
2107
smu_v14_0_2_mode2_reset(struct smu_context * smu)2108 static int smu_v14_0_2_mode2_reset(struct smu_context *smu)
2109 {
2110 int ret = 0;
2111
2112 // TODO
2113
2114 return ret;
2115 }
2116
smu_v14_0_2_enable_gfx_features(struct smu_context * smu)2117 static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
2118 {
2119 struct amdgpu_device *adev = smu->adev;
2120
2121 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2))
2122 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
2123 FEATURE_PWR_GFX, NULL);
2124 else
2125 return -EOPNOTSUPP;
2126 }
2127
smu_v14_0_2_set_smu_mailbox_registers(struct smu_context * smu)2128 static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu)
2129 {
2130 struct amdgpu_device *adev = smu->adev;
2131
2132 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82);
2133 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66);
2134 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90);
2135
2136 smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_53);
2137 smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_75);
2138 smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54);
2139 }
2140
smu_v14_0_2_get_gpu_metrics(struct smu_context * smu,void ** table)2141 static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
2142 void **table)
2143 {
2144 struct smu_table_context *smu_table = &smu->smu_table;
2145 struct gpu_metrics_v1_3 *gpu_metrics =
2146 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2147 SmuMetricsExternal_t metrics_ext;
2148 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
2149 int ret = 0;
2150
2151 ret = smu_cmn_get_metrics_table(smu,
2152 &metrics_ext,
2153 true);
2154 if (ret)
2155 return ret;
2156
2157 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2158
2159 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
2160 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
2161 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
2162 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
2163 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
2164 gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
2165 metrics->AvgTemperature[TEMP_VR_MEM1]);
2166
2167 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
2168 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
2169 gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
2170 metrics->Vcn1ActivityPercentage);
2171
2172 gpu_metrics->average_socket_power = metrics->AverageSocketPower;
2173 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
2174
2175 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
2176 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
2177 else
2178 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
2179
2180 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
2181 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
2182 else
2183 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
2184
2185 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
2186 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
2187 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
2188 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
2189
2190 gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
2191 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
2192 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
2193 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
2194 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
2195 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0];
2196 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0];
2197
2198 gpu_metrics->throttle_status =
2199 smu_v14_0_2_get_throttler_status(metrics);
2200 gpu_metrics->indep_throttle_status =
2201 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
2202 smu_v14_0_2_throttler_map);
2203
2204 gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
2205
2206 gpu_metrics->pcie_link_width = metrics->PcieWidth;
2207 if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
2208 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
2209 else
2210 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
2211
2212 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2213
2214 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
2215 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC];
2216 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM];
2217
2218 *table = (void *)gpu_metrics;
2219
2220 return sizeof(struct gpu_metrics_v1_3);
2221 }
2222
smu_v14_0_2_dump_od_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2223 static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
2224 OverDriveTableExternal_t *od_table)
2225 {
2226 struct amdgpu_device *adev = smu->adev;
2227
2228 dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
2229 dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
2230 od_table->OverDriveTable.UclkFmax);
2231 }
2232
smu_v14_0_2_upload_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2233 static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu,
2234 OverDriveTableExternal_t *od_table)
2235 {
2236 int ret;
2237 ret = smu_cmn_update_table(smu,
2238 SMU_TABLE_OVERDRIVE,
2239 0,
2240 (void *)od_table,
2241 true);
2242 if (ret)
2243 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2244
2245 return ret;
2246 }
2247
smu_v14_0_2_set_supported_od_feature_mask(struct smu_context * smu)2248 static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu)
2249 {
2250 struct amdgpu_device *adev = smu->adev;
2251
2252 if (smu_v14_0_2_is_od_feature_supported(smu,
2253 PP_OD_FEATURE_FAN_CURVE_BIT))
2254 adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
2255 OD_OPS_SUPPORT_FAN_CURVE_SET |
2256 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
2257 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
2258 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
2259 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
2260 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
2261 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
2262 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
2263 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
2264 }
2265
smu_v14_0_2_get_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2266 static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu,
2267 OverDriveTableExternal_t *od_table)
2268 {
2269 int ret;
2270 ret = smu_cmn_update_table(smu,
2271 SMU_TABLE_OVERDRIVE,
2272 0,
2273 (void *)od_table,
2274 false);
2275 if (ret)
2276 dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
2277
2278 return ret;
2279 }
2280
smu_v14_0_2_set_default_od_settings(struct smu_context * smu)2281 static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
2282 {
2283 OverDriveTableExternal_t *od_table =
2284 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
2285 OverDriveTableExternal_t *boot_od_table =
2286 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
2287 OverDriveTableExternal_t *user_od_table =
2288 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
2289 OverDriveTableExternal_t user_od_table_bak;
2290 int ret;
2291 int i;
2292
2293 ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table);
2294 if (ret)
2295 return ret;
2296
2297 smu_v14_0_2_dump_od_table(smu, boot_od_table);
2298
2299 memcpy(od_table,
2300 boot_od_table,
2301 sizeof(OverDriveTableExternal_t));
2302
2303 /*
2304 * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
2305 * but we have to preserve user defined values in "user_od_table".
2306 */
2307 if (!smu->adev->in_suspend) {
2308 memcpy(user_od_table,
2309 boot_od_table,
2310 sizeof(OverDriveTableExternal_t));
2311 smu->user_dpm_profile.user_od = false;
2312 } else if (smu->user_dpm_profile.user_od) {
2313 memcpy(&user_od_table_bak,
2314 user_od_table,
2315 sizeof(OverDriveTableExternal_t));
2316 memcpy(user_od_table,
2317 boot_od_table,
2318 sizeof(OverDriveTableExternal_t));
2319 user_od_table->OverDriveTable.GfxclkFoffset =
2320 user_od_table_bak.OverDriveTable.GfxclkFoffset;
2321 user_od_table->OverDriveTable.UclkFmin =
2322 user_od_table_bak.OverDriveTable.UclkFmin;
2323 user_od_table->OverDriveTable.UclkFmax =
2324 user_od_table_bak.OverDriveTable.UclkFmax;
2325 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2326 user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
2327 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
2328 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
2329 user_od_table->OverDriveTable.FanLinearTempPoints[i] =
2330 user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
2331 user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
2332 user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
2333 }
2334 user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
2335 user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
2336 user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
2337 user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
2338 user_od_table->OverDriveTable.FanTargetTemperature =
2339 user_od_table_bak.OverDriveTable.FanTargetTemperature;
2340 user_od_table->OverDriveTable.FanMinimumPwm =
2341 user_od_table_bak.OverDriveTable.FanMinimumPwm;
2342 }
2343
2344 smu_v14_0_2_set_supported_od_feature_mask(smu);
2345
2346 return 0;
2347 }
2348
smu_v14_0_2_restore_user_od_settings(struct smu_context * smu)2349 static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
2350 {
2351 struct smu_table_context *table_context = &smu->smu_table;
2352 OverDriveTableExternal_t *od_table = table_context->overdrive_table;
2353 OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
2354 int res;
2355
2356 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
2357 BIT(PP_OD_FEATURE_UCLK_BIT) |
2358 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
2359 BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2360 res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
2361 user_od_table->OverDriveTable.FeatureCtrlMask = 0;
2362 if (res == 0)
2363 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
2364
2365 return res;
2366 }
2367
smu_v14_0_2_od_restore_table_single(struct smu_context * smu,long input)2368 static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input)
2369 {
2370 struct smu_table_context *table_context = &smu->smu_table;
2371 OverDriveTableExternal_t *boot_overdrive_table =
2372 (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
2373 OverDriveTableExternal_t *od_table =
2374 (OverDriveTableExternal_t *)table_context->overdrive_table;
2375 struct amdgpu_device *adev = smu->adev;
2376 int i;
2377
2378 switch (input) {
2379 case PP_OD_EDIT_FAN_CURVE:
2380 for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
2381 od_table->OverDriveTable.FanLinearTempPoints[i] =
2382 boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
2383 od_table->OverDriveTable.FanLinearPwmPoints[i] =
2384 boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
2385 }
2386 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2387 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2388 break;
2389 case PP_OD_EDIT_ACOUSTIC_LIMIT:
2390 od_table->OverDriveTable.AcousticLimitRpmThreshold =
2391 boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
2392 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2393 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2394 break;
2395 case PP_OD_EDIT_ACOUSTIC_TARGET:
2396 od_table->OverDriveTable.AcousticTargetRpmThreshold =
2397 boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
2398 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2399 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2400 break;
2401 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
2402 od_table->OverDriveTable.FanTargetTemperature =
2403 boot_overdrive_table->OverDriveTable.FanTargetTemperature;
2404 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2405 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2406 break;
2407 case PP_OD_EDIT_FAN_MINIMUM_PWM:
2408 od_table->OverDriveTable.FanMinimumPwm =
2409 boot_overdrive_table->OverDriveTable.FanMinimumPwm;
2410 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2411 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2412 break;
2413 default:
2414 dev_info(adev->dev, "Invalid table index: %ld\n", input);
2415 return -EINVAL;
2416 }
2417
2418 return 0;
2419 }
2420
smu_v14_0_2_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)2421 static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
2422 enum PP_OD_DPM_TABLE_COMMAND type,
2423 long input[],
2424 uint32_t size)
2425 {
2426 struct smu_table_context *table_context = &smu->smu_table;
2427 OverDriveTableExternal_t *od_table =
2428 (OverDriveTableExternal_t *)table_context->overdrive_table;
2429 struct amdgpu_device *adev = smu->adev;
2430 uint32_t offset_of_voltageoffset;
2431 int32_t minimum, maximum;
2432 uint32_t feature_ctrlmask;
2433 int i, ret = 0;
2434
2435 switch (type) {
2436 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2437 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
2438 dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
2439 return -ENOTSUPP;
2440 }
2441
2442 if (size != 1) {
2443 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
2444 return -EINVAL;
2445 }
2446
2447 smu_v14_0_2_get_od_setting_limits(smu,
2448 PP_OD_FEATURE_GFXCLK_FMAX,
2449 &minimum,
2450 &maximum);
2451 if (input[0] < minimum ||
2452 input[0] > maximum) {
2453 dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
2454 minimum, maximum);
2455 return -EINVAL;
2456 }
2457
2458 od_table->OverDriveTable.GfxclkFoffset = input[0];
2459 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
2460 break;
2461
2462 case PP_OD_EDIT_MCLK_VDDC_TABLE:
2463 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
2464 dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
2465 return -ENOTSUPP;
2466 }
2467
2468 for (i = 0; i < size; i += 2) {
2469 if (i + 2 > size) {
2470 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
2471 return -EINVAL;
2472 }
2473
2474 switch (input[i]) {
2475 case 0:
2476 smu_v14_0_2_get_od_setting_limits(smu,
2477 PP_OD_FEATURE_UCLK_FMIN,
2478 &minimum,
2479 &maximum);
2480 if (input[i + 1] < minimum ||
2481 input[i + 1] > maximum) {
2482 dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
2483 input[i + 1], minimum, maximum);
2484 return -EINVAL;
2485 }
2486
2487 od_table->OverDriveTable.UclkFmin = input[i + 1];
2488 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
2489 break;
2490
2491 case 1:
2492 smu_v14_0_2_get_od_setting_limits(smu,
2493 PP_OD_FEATURE_UCLK_FMAX,
2494 &minimum,
2495 &maximum);
2496 if (input[i + 1] < minimum ||
2497 input[i + 1] > maximum) {
2498 dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
2499 input[i + 1], minimum, maximum);
2500 return -EINVAL;
2501 }
2502
2503 od_table->OverDriveTable.UclkFmax = input[i + 1];
2504 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
2505 break;
2506
2507 default:
2508 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
2509 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
2510 return -EINVAL;
2511 }
2512 }
2513
2514 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
2515 dev_err(adev->dev,
2516 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
2517 (uint32_t)od_table->OverDriveTable.UclkFmin,
2518 (uint32_t)od_table->OverDriveTable.UclkFmax);
2519 return -EINVAL;
2520 }
2521 break;
2522
2523 case PP_OD_EDIT_VDDGFX_OFFSET:
2524 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
2525 dev_warn(adev->dev, "Gfx offset setting not supported!\n");
2526 return -ENOTSUPP;
2527 }
2528
2529 smu_v14_0_2_get_od_setting_limits(smu,
2530 PP_OD_FEATURE_GFX_VF_CURVE,
2531 &minimum,
2532 &maximum);
2533 if (input[0] < minimum ||
2534 input[0] > maximum) {
2535 dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
2536 input[0], minimum, maximum);
2537 return -EINVAL;
2538 }
2539
2540 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2541 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
2542 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
2543 break;
2544
2545 case PP_OD_EDIT_FAN_CURVE:
2546 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2547 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2548 return -ENOTSUPP;
2549 }
2550
2551 if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
2552 input[0] < 0)
2553 return -EINVAL;
2554
2555 smu_v14_0_2_get_od_setting_limits(smu,
2556 PP_OD_FEATURE_FAN_CURVE_TEMP,
2557 &minimum,
2558 &maximum);
2559 if (input[1] < minimum ||
2560 input[1] > maximum) {
2561 dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
2562 input[1], minimum, maximum);
2563 return -EINVAL;
2564 }
2565
2566 smu_v14_0_2_get_od_setting_limits(smu,
2567 PP_OD_FEATURE_FAN_CURVE_PWM,
2568 &minimum,
2569 &maximum);
2570 if (input[2] < minimum ||
2571 input[2] > maximum) {
2572 dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
2573 input[2], minimum, maximum);
2574 return -EINVAL;
2575 }
2576
2577 od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
2578 od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
2579 od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
2580 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2581 break;
2582
2583 case PP_OD_EDIT_ACOUSTIC_LIMIT:
2584 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2585 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2586 return -ENOTSUPP;
2587 }
2588
2589 smu_v14_0_2_get_od_setting_limits(smu,
2590 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
2591 &minimum,
2592 &maximum);
2593 if (input[0] < minimum ||
2594 input[0] > maximum) {
2595 dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
2596 input[0], minimum, maximum);
2597 return -EINVAL;
2598 }
2599
2600 od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
2601 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2602 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2603 break;
2604
2605 case PP_OD_EDIT_ACOUSTIC_TARGET:
2606 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2607 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2608 return -ENOTSUPP;
2609 }
2610
2611 smu_v14_0_2_get_od_setting_limits(smu,
2612 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
2613 &minimum,
2614 &maximum);
2615 if (input[0] < minimum ||
2616 input[0] > maximum) {
2617 dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
2618 input[0], minimum, maximum);
2619 return -EINVAL;
2620 }
2621
2622 od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
2623 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2624 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2625 break;
2626
2627 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
2628 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2629 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2630 return -ENOTSUPP;
2631 }
2632
2633 smu_v14_0_2_get_od_setting_limits(smu,
2634 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
2635 &minimum,
2636 &maximum);
2637 if (input[0] < minimum ||
2638 input[0] > maximum) {
2639 dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
2640 input[0], minimum, maximum);
2641 return -EINVAL;
2642 }
2643
2644 od_table->OverDriveTable.FanTargetTemperature = input[0];
2645 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2646 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2647 break;
2648
2649 case PP_OD_EDIT_FAN_MINIMUM_PWM:
2650 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2651 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2652 return -ENOTSUPP;
2653 }
2654
2655 smu_v14_0_2_get_od_setting_limits(smu,
2656 PP_OD_FEATURE_FAN_MINIMUM_PWM,
2657 &minimum,
2658 &maximum);
2659 if (input[0] < minimum ||
2660 input[0] > maximum) {
2661 dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
2662 input[0], minimum, maximum);
2663 return -EINVAL;
2664 }
2665
2666 od_table->OverDriveTable.FanMinimumPwm = input[0];
2667 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2668 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2669 break;
2670
2671 case PP_OD_RESTORE_DEFAULT_TABLE:
2672 if (size == 1) {
2673 ret = smu_v14_0_2_od_restore_table_single(smu, input[0]);
2674 if (ret)
2675 return ret;
2676 } else {
2677 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
2678 memcpy(od_table,
2679 table_context->boot_overdrive_table,
2680 sizeof(OverDriveTableExternal_t));
2681 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
2682 }
2683 fallthrough;
2684 case PP_OD_COMMIT_DPM_TABLE:
2685 /*
2686 * The member below instructs PMFW the settings focused in
2687 * this single operation.
2688 * `uint32_t FeatureCtrlMask;`
2689 * It does not contain actual informations about user's custom
2690 * settings. Thus we do not cache it.
2691 */
2692 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
2693 if (memcmp((u8 *)od_table + offset_of_voltageoffset,
2694 table_context->user_overdrive_table + offset_of_voltageoffset,
2695 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
2696 smu_v14_0_2_dump_od_table(smu, od_table);
2697
2698 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2699 if (ret) {
2700 dev_err(adev->dev, "Failed to upload overdrive table!\n");
2701 return ret;
2702 }
2703
2704 od_table->OverDriveTable.FeatureCtrlMask = 0;
2705 memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
2706 (u8 *)od_table + offset_of_voltageoffset,
2707 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
2708
2709 if (!memcmp(table_context->user_overdrive_table,
2710 table_context->boot_overdrive_table,
2711 sizeof(OverDriveTableExternal_t)))
2712 smu->user_dpm_profile.user_od = false;
2713 else
2714 smu->user_dpm_profile.user_od = true;
2715 }
2716 break;
2717
2718 default:
2719 return -ENOSYS;
2720 }
2721
2722 return ret;
2723 }
2724
smu_v14_0_2_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t limit)2725 static int smu_v14_0_2_set_power_limit(struct smu_context *smu,
2726 enum smu_ppt_limit_type limit_type,
2727 uint32_t limit)
2728 {
2729 PPTable_t *pptable = smu->smu_table.driver_pptable;
2730 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
2731 struct smu_table_context *table_context = &smu->smu_table;
2732 OverDriveTableExternal_t *od_table =
2733 (OverDriveTableExternal_t *)table_context->overdrive_table;
2734 int ret = 0;
2735
2736 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2737 return -EINVAL;
2738
2739 if (limit <= msg_limit) {
2740 if (smu->current_power_limit > msg_limit) {
2741 od_table->OverDriveTable.Ppt = 0;
2742 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2743
2744 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2745 if (ret) {
2746 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2747 return ret;
2748 }
2749 }
2750 return smu_v14_0_set_power_limit(smu, limit_type, limit);
2751 } else if (smu->od_enabled) {
2752 ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit);
2753 if (ret)
2754 return ret;
2755
2756 od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
2757 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2758
2759 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2760 if (ret) {
2761 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2762 return ret;
2763 }
2764
2765 smu->current_power_limit = limit;
2766 } else {
2767 return -EINVAL;
2768 }
2769
2770 return 0;
2771 }
2772
2773 static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
2774 .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
2775 .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
2776 .i2c_init = smu_v14_0_2_i2c_control_init,
2777 .i2c_fini = smu_v14_0_2_i2c_control_fini,
2778 .is_dpm_running = smu_v14_0_2_is_dpm_running,
2779 .init_microcode = smu_v14_0_init_microcode,
2780 .load_microcode = smu_v14_0_load_microcode,
2781 .fini_microcode = smu_v14_0_fini_microcode,
2782 .init_smc_tables = smu_v14_0_2_init_smc_tables,
2783 .fini_smc_tables = smu_v14_0_fini_smc_tables,
2784 .init_power = smu_v14_0_init_power,
2785 .fini_power = smu_v14_0_fini_power,
2786 .check_fw_status = smu_v14_0_check_fw_status,
2787 .setup_pptable = smu_v14_0_2_setup_pptable,
2788 .check_fw_version = smu_v14_0_check_fw_version,
2789 .set_driver_table_location = smu_v14_0_set_driver_table_location,
2790 .system_features_control = smu_v14_0_system_features_control,
2791 .set_allowed_mask = smu_v14_0_set_allowed_mask,
2792 .get_enabled_mask = smu_cmn_get_enabled_mask,
2793 .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable,
2794 .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable,
2795 .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq,
2796 .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
2797 .read_sensor = smu_v14_0_2_read_sensor,
2798 .feature_is_enabled = smu_cmn_feature_is_enabled,
2799 .print_clk_levels = smu_v14_0_2_print_clk_levels,
2800 .force_clk_levels = smu_v14_0_2_force_clk_levels,
2801 .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters,
2802 .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range,
2803 .register_irq_handler = smu_v14_0_register_irq_handler,
2804 .enable_thermal_alert = smu_v14_0_enable_thermal_alert,
2805 .disable_thermal_alert = smu_v14_0_disable_thermal_alert,
2806 .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
2807 .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
2808 .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
2809 .set_default_od_settings = smu_v14_0_2_set_default_od_settings,
2810 .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings,
2811 .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table,
2812 .init_pptable_microcode = smu_v14_0_init_pptable_microcode,
2813 .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
2814 .set_performance_level = smu_v14_0_set_performance_level,
2815 .gfx_off_control = smu_v14_0_gfx_off_control,
2816 .get_unique_id = smu_v14_0_2_get_unique_id,
2817 .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
2818 .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
2819 .get_power_limit = smu_v14_0_2_get_power_limit,
2820 .set_power_limit = smu_v14_0_2_set_power_limit,
2821 .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
2822 .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
2823 .run_btc = smu_v14_0_run_btc,
2824 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2825 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2826 .set_tool_table_location = smu_v14_0_set_tool_table_location,
2827 .deep_sleep_control = smu_v14_0_deep_sleep_control,
2828 .gfx_ulv_control = smu_v14_0_gfx_ulv_control,
2829 .get_bamaco_support = smu_v14_0_get_bamaco_support,
2830 .baco_get_state = smu_v14_0_baco_get_state,
2831 .baco_set_state = smu_v14_0_baco_set_state,
2832 .baco_enter = smu_v14_0_2_baco_enter,
2833 .baco_exit = smu_v14_0_2_baco_exit,
2834 .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported,
2835 .mode1_reset = smu_v14_0_2_mode1_reset,
2836 .mode2_reset = smu_v14_0_2_mode2_reset,
2837 .enable_gfx_features = smu_v14_0_2_enable_gfx_features,
2838 .set_mp1_state = smu_v14_0_2_set_mp1_state,
2839 .set_df_cstate = smu_v14_0_2_set_df_cstate,
2840 #if 0
2841 .gpo_control = smu_v14_0_gpo_control,
2842 #endif
2843 };
2844
smu_v14_0_2_set_ppt_funcs(struct smu_context * smu)2845 void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
2846 {
2847 smu->ppt_funcs = &smu_v14_0_2_ppt_funcs;
2848 smu->message_map = smu_v14_0_2_message_map;
2849 smu->clock_map = smu_v14_0_2_clk_map;
2850 smu->feature_map = smu_v14_0_2_feature_mask_map;
2851 smu->table_map = smu_v14_0_2_table_map;
2852 smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
2853 smu->workload_map = smu_v14_0_2_workload_map;
2854 smu_v14_0_2_set_smu_mailbox_registers(smu);
2855 }
2856