1 /*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "smu_v13_0.h"
35 #include "smu13_driver_if_v13_0_7.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "smu_v13_0_7_ppt.h"
39 #include "smu_v13_0_7_pptable.h"
40 #include "smu_v13_0_7_ppsmc.h"
41 #include "nbio/nbio_4_3_0_offset.h"
42 #include "nbio/nbio_4_3_0_sh_mask.h"
43 #include "mp/mp_13_0_0_offset.h"
44 #include "mp/mp_13_0_0_sh_mask.h"
45
46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h"
47 #include "smu_cmn.h"
48 #include "amdgpu_ras.h"
49
50 /*
51 * DO NOT use these for err/warn/info/debug messages.
52 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53 * They are more MGPU friendly.
54 */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59
60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
61
62 #define FEATURE_MASK(feature) (1ULL << feature)
63 #define SMC_DPM_FEATURE ( \
64 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
65 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
66 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
67 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
68 FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
69 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
70
71 #define smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 0x3b10028
72
73 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
74
75 #define PP_OD_FEATURE_GFXCLK_FMIN 0
76 #define PP_OD_FEATURE_GFXCLK_FMAX 1
77 #define PP_OD_FEATURE_UCLK_FMIN 2
78 #define PP_OD_FEATURE_UCLK_FMAX 3
79 #define PP_OD_FEATURE_GFX_VF_CURVE 4
80 #define PP_OD_FEATURE_FAN_CURVE_TEMP 5
81 #define PP_OD_FEATURE_FAN_CURVE_PWM 6
82 #define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
83 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
84 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
85 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
86 #define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11
87 #define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12
88
89 #define LINK_SPEED_MAX 3
90
91 static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = {
92 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
93 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
94 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
95 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
96 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
97 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
98 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
99 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
100 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
101 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
102 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
103 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
104 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
105 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
106 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
107 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
108 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
109 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
110 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
111 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
112 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
113 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
114 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
115 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
116 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
117 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
118 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
119 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
120 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
121 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
122 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
123 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
124 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
125 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
126 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
127 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
128 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
129 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
130 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
131 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
132 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
133 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
134 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
135 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
136 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
137 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
138 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
139 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
140 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
141 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
142 MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
143 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
144 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
145 MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0),
146 };
147
148 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
149 CLK_MAP(GFXCLK, PPCLK_GFXCLK),
150 CLK_MAP(SCLK, PPCLK_GFXCLK),
151 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
152 CLK_MAP(FCLK, PPCLK_FCLK),
153 CLK_MAP(UCLK, PPCLK_UCLK),
154 CLK_MAP(MCLK, PPCLK_UCLK),
155 CLK_MAP(VCLK, PPCLK_VCLK_0),
156 CLK_MAP(VCLK1, PPCLK_VCLK_1),
157 CLK_MAP(DCLK, PPCLK_DCLK_0),
158 CLK_MAP(DCLK1, PPCLK_DCLK_1),
159 CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
160 };
161
162 static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = {
163 FEA_MAP(FW_DATA_READ),
164 FEA_MAP(DPM_GFXCLK),
165 FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
166 FEA_MAP(DPM_UCLK),
167 FEA_MAP(DPM_FCLK),
168 FEA_MAP(DPM_SOCCLK),
169 FEA_MAP(DPM_MP0CLK),
170 FEA_MAP(DPM_LINK),
171 FEA_MAP(DPM_DCN),
172 FEA_MAP(VMEMP_SCALING),
173 FEA_MAP(VDDIO_MEM_SCALING),
174 FEA_MAP(DS_GFXCLK),
175 FEA_MAP(DS_SOCCLK),
176 FEA_MAP(DS_FCLK),
177 FEA_MAP(DS_LCLK),
178 FEA_MAP(DS_DCFCLK),
179 FEA_MAP(DS_UCLK),
180 FEA_MAP(GFX_ULV),
181 FEA_MAP(FW_DSTATE),
182 FEA_MAP(GFXOFF),
183 FEA_MAP(BACO),
184 FEA_MAP(MM_DPM),
185 FEA_MAP(SOC_MPCLK_DS),
186 FEA_MAP(BACO_MPCLK_DS),
187 FEA_MAP(THROTTLERS),
188 FEA_MAP(SMARTSHIFT),
189 FEA_MAP(GTHR),
190 FEA_MAP(ACDC),
191 FEA_MAP(VR0HOT),
192 FEA_MAP(FW_CTF),
193 FEA_MAP(FAN_CONTROL),
194 FEA_MAP(GFX_DCS),
195 FEA_MAP(GFX_READ_MARGIN),
196 FEA_MAP(LED_DISPLAY),
197 FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
198 FEA_MAP(OUT_OF_BAND_MONITOR),
199 FEA_MAP(OPTIMIZED_VMIN),
200 FEA_MAP(GFX_IMU),
201 FEA_MAP(BOOT_TIME_CAL),
202 FEA_MAP(GFX_PCC_DFLL),
203 FEA_MAP(SOC_CG),
204 FEA_MAP(DF_CSTATE),
205 FEA_MAP(GFX_EDC),
206 FEA_MAP(BOOT_POWER_OPT),
207 FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
208 FEA_MAP(DS_VCN),
209 FEA_MAP(BACO_CG),
210 FEA_MAP(MEM_TEMP_READ),
211 FEA_MAP(ATHUB_MMHUB_PG),
212 FEA_MAP(SOC_PCC),
213 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
214 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
215 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
216 };
217
218 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
219 TAB_MAP(PPTABLE),
220 TAB_MAP(WATERMARKS),
221 TAB_MAP(AVFS_PSM_DEBUG),
222 TAB_MAP(PMSTATUSLOG),
223 TAB_MAP(SMU_METRICS),
224 TAB_MAP(DRIVER_SMU_CONFIG),
225 TAB_MAP(ACTIVITY_MONITOR_COEFF),
226 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
227 TAB_MAP(OVERDRIVE),
228 TAB_MAP(WIFIBAND),
229 };
230
231 static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
232 PWR_MAP(AC),
233 PWR_MAP(DC),
234 };
235
236 static struct cmn2asic_mapping smu_v13_0_7_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
237 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
238 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
239 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
240 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
241 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
242 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
243 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
244 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
245 };
246
247 static const uint8_t smu_v13_0_7_throttler_map[] = {
248 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
249 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
250 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
251 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
252 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
253 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
254 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
255 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
256 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
257 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
258 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
259 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
260 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
261 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
262 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
263 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
264 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
265 };
266
267 static int
smu_v13_0_7_get_allowed_feature_mask(struct smu_context * smu,uint32_t * feature_mask,uint32_t num)268 smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu,
269 uint32_t *feature_mask, uint32_t num)
270 {
271 struct amdgpu_device *adev = smu->adev;
272
273 if (num > 2)
274 return -EINVAL;
275
276 memset(feature_mask, 0, sizeof(uint32_t) * num);
277
278 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
279
280 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
281 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
282 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
283 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
284 }
285
286 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
287 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
288
289 if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
290 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
291 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
292 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
293 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
294 }
295
296 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
297
298 if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
299 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
300
301 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
302 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
303
304 if (adev->pm.pp_feature & PP_ULV_MASK)
305 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
306
307 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
308 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
309 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
310 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
311 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
312 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
313 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
314 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
315 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
316 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
317 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
318 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT);
319 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
320 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
321 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
322 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
323 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_PCC_DFLL_BIT);
324 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
325 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
326
327 if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
328 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
329
330 if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
331 (adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
332 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
333
334 return 0;
335 }
336
smu_v13_0_7_check_powerplay_table(struct smu_context * smu)337 static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
338 {
339 struct smu_table_context *table_context = &smu->smu_table;
340 struct smu_13_0_7_powerplay_table *powerplay_table =
341 table_context->power_play_table;
342 struct smu_baco_context *smu_baco = &smu->smu_baco;
343 PPTable_t *smc_pptable = table_context->driver_pptable;
344 BoardTable_t *BoardTable = &smc_pptable->BoardTable;
345 const OverDriveLimits_t * const overdrive_upperlimits =
346 &smc_pptable->SkuTable.OverDriveLimitsBasicMax;
347 const OverDriveLimits_t * const overdrive_lowerlimits =
348 &smc_pptable->SkuTable.OverDriveLimitsMin;
349
350 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
351 smu->dc_controlled_by_gpio = true;
352
353 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
354 smu_baco->platform_support = true;
355
356 if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
357 && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
358 smu_baco->maco_support = true;
359 }
360
361 if (!overdrive_lowerlimits->FeatureCtrlMask ||
362 !overdrive_upperlimits->FeatureCtrlMask)
363 smu->od_enabled = false;
364
365 table_context->thermal_controller_type =
366 powerplay_table->thermal_controller_type;
367
368 /*
369 * Instead of having its own buffer space and get overdrive_table copied,
370 * smu->od_settings just points to the actual overdrive_table
371 */
372 smu->od_settings = &powerplay_table->overdrive_table;
373
374 return 0;
375 }
376
smu_v13_0_7_store_powerplay_table(struct smu_context * smu)377 static int smu_v13_0_7_store_powerplay_table(struct smu_context *smu)
378 {
379 struct smu_table_context *table_context = &smu->smu_table;
380 struct smu_13_0_7_powerplay_table *powerplay_table =
381 table_context->power_play_table;
382 struct amdgpu_device *adev = smu->adev;
383
384 if (adev->pdev->device == 0x51)
385 powerplay_table->smc_pptable.SkuTable.DebugOverrides |= 0x00000080;
386
387 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
388 sizeof(PPTable_t));
389
390 return 0;
391 }
392
smu_v13_0_7_check_fw_status(struct smu_context * smu)393 static int smu_v13_0_7_check_fw_status(struct smu_context *smu)
394 {
395 struct amdgpu_device *adev = smu->adev;
396 uint32_t mp1_fw_flags;
397
398 mp1_fw_flags = RREG32_PCIE(MP1_Public |
399 (smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 & 0xffffffff));
400
401 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
402 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
403 return 0;
404
405 return -EIO;
406 }
407
408 #ifndef atom_smc_dpm_info_table_13_0_7
409 struct atom_smc_dpm_info_table_13_0_7 {
410 struct atom_common_table_header table_header;
411 BoardTable_t BoardTable;
412 };
413 #endif
414
smu_v13_0_7_append_powerplay_table(struct smu_context * smu)415 static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
416 {
417 struct smu_table_context *table_context = &smu->smu_table;
418
419 PPTable_t *smc_pptable = table_context->driver_pptable;
420
421 struct atom_smc_dpm_info_table_13_0_7 *smc_dpm_table;
422
423 BoardTable_t *BoardTable = &smc_pptable->BoardTable;
424
425 int index, ret;
426
427 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
428 smc_dpm_info);
429
430 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
431 (uint8_t **)&smc_dpm_table);
432 if (ret)
433 return ret;
434
435 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t));
436
437 return 0;
438 }
439
smu_v13_0_7_get_pptable_from_pmfw(struct smu_context * smu,void ** table,uint32_t * size)440 static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
441 void **table,
442 uint32_t *size)
443 {
444 struct smu_table_context *smu_table = &smu->smu_table;
445 void *combo_pptable = smu_table->combo_pptable;
446 int ret = 0;
447
448 ret = smu_cmn_get_combo_pptable(smu);
449 if (ret)
450 return ret;
451
452 *table = combo_pptable;
453 *size = sizeof(struct smu_13_0_7_powerplay_table);
454
455 return 0;
456 }
457
smu_v13_0_7_setup_pptable(struct smu_context * smu)458 static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
459 {
460 struct smu_table_context *smu_table = &smu->smu_table;
461 struct amdgpu_device *adev = smu->adev;
462 int ret = 0;
463
464 /*
465 * With SCPM enabled, the pptable used will be signed. It cannot
466 * be used directly by driver. To get the raw pptable, we need to
467 * rely on the combo pptable(and its revelant SMU message).
468 */
469 ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
470 &smu_table->power_play_table,
471 &smu_table->power_play_table_size);
472 if (ret)
473 return ret;
474
475 ret = smu_v13_0_7_store_powerplay_table(smu);
476 if (ret)
477 return ret;
478
479 /*
480 * With SCPM enabled, the operation below will be handled
481 * by PSP. Driver involvment is unnecessary and useless.
482 */
483 if (!adev->scpm_enabled) {
484 ret = smu_v13_0_7_append_powerplay_table(smu);
485 if (ret)
486 return ret;
487 }
488
489 ret = smu_v13_0_7_check_powerplay_table(smu);
490 if (ret)
491 return ret;
492
493 return ret;
494 }
495
smu_v13_0_7_tables_init(struct smu_context * smu)496 static int smu_v13_0_7_tables_init(struct smu_context *smu)
497 {
498 struct smu_table_context *smu_table = &smu->smu_table;
499 struct smu_table *tables = smu_table->tables;
500
501 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
502 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
503
504 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
505 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
506 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
507 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
508 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
509 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
510 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTableExternal_t),
511 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
512 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
513 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
514 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
515 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
516 AMDGPU_GEM_DOMAIN_VRAM);
517 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
518 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
519 SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND,
520 sizeof(WifiBandEntryTable_t), PAGE_SIZE,
521 AMDGPU_GEM_DOMAIN_VRAM);
522
523 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
524 if (!smu_table->metrics_table)
525 goto err0_out;
526 smu_table->metrics_time = 0;
527
528 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
529 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
530 if (!smu_table->gpu_metrics_table)
531 goto err1_out;
532
533 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
534 if (!smu_table->watermarks_table)
535 goto err2_out;
536
537 return 0;
538
539 err2_out:
540 kfree(smu_table->gpu_metrics_table);
541 err1_out:
542 kfree(smu_table->metrics_table);
543 err0_out:
544 return -ENOMEM;
545 }
546
smu_v13_0_7_allocate_dpm_context(struct smu_context * smu)547 static int smu_v13_0_7_allocate_dpm_context(struct smu_context *smu)
548 {
549 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
550
551 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
552 GFP_KERNEL);
553 if (!smu_dpm->dpm_context)
554 return -ENOMEM;
555
556 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
557
558 return 0;
559 }
560
smu_v13_0_7_init_smc_tables(struct smu_context * smu)561 static int smu_v13_0_7_init_smc_tables(struct smu_context *smu)
562 {
563 int ret = 0;
564
565 ret = smu_v13_0_7_tables_init(smu);
566 if (ret)
567 return ret;
568
569 ret = smu_v13_0_7_allocate_dpm_context(smu);
570 if (ret)
571 return ret;
572
573 return smu_v13_0_init_smc_tables(smu);
574 }
575
smu_v13_0_7_set_default_dpm_table(struct smu_context * smu)576 static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
577 {
578 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
579 PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
580 SkuTable_t *skutable = &driver_ppt->SkuTable;
581 struct smu_13_0_dpm_table *dpm_table;
582 struct smu_13_0_pcie_table *pcie_table;
583 uint32_t link_level;
584 int ret = 0;
585
586 /* socclk dpm table setup */
587 dpm_table = &dpm_context->dpm_tables.soc_table;
588 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
589 ret = smu_v13_0_set_single_dpm_table(smu,
590 SMU_SOCCLK,
591 dpm_table);
592 if (ret)
593 return ret;
594 } else {
595 dpm_table->count = 1;
596 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
597 dpm_table->dpm_levels[0].enabled = true;
598 dpm_table->min = dpm_table->dpm_levels[0].value;
599 dpm_table->max = dpm_table->dpm_levels[0].value;
600 }
601
602 /* gfxclk dpm table setup */
603 dpm_table = &dpm_context->dpm_tables.gfx_table;
604 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
605 ret = smu_v13_0_set_single_dpm_table(smu,
606 SMU_GFXCLK,
607 dpm_table);
608 if (ret)
609 return ret;
610
611 if (skutable->DriverReportedClocks.GameClockAc &&
612 (dpm_table->dpm_levels[dpm_table->count - 1].value >
613 skutable->DriverReportedClocks.GameClockAc)) {
614 dpm_table->dpm_levels[dpm_table->count - 1].value =
615 skutable->DriverReportedClocks.GameClockAc;
616 dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
617 }
618 } else {
619 dpm_table->count = 1;
620 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
621 dpm_table->dpm_levels[0].enabled = true;
622 dpm_table->min = dpm_table->dpm_levels[0].value;
623 dpm_table->max = dpm_table->dpm_levels[0].value;
624 }
625
626 /* uclk dpm table setup */
627 dpm_table = &dpm_context->dpm_tables.uclk_table;
628 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
629 ret = smu_v13_0_set_single_dpm_table(smu,
630 SMU_UCLK,
631 dpm_table);
632 if (ret)
633 return ret;
634 } else {
635 dpm_table->count = 1;
636 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
637 dpm_table->dpm_levels[0].enabled = true;
638 dpm_table->min = dpm_table->dpm_levels[0].value;
639 dpm_table->max = dpm_table->dpm_levels[0].value;
640 }
641
642 /* fclk dpm table setup */
643 dpm_table = &dpm_context->dpm_tables.fclk_table;
644 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
645 ret = smu_v13_0_set_single_dpm_table(smu,
646 SMU_FCLK,
647 dpm_table);
648 if (ret)
649 return ret;
650 } else {
651 dpm_table->count = 1;
652 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
653 dpm_table->dpm_levels[0].enabled = true;
654 dpm_table->min = dpm_table->dpm_levels[0].value;
655 dpm_table->max = dpm_table->dpm_levels[0].value;
656 }
657
658 /* vclk dpm table setup */
659 dpm_table = &dpm_context->dpm_tables.vclk_table;
660 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
661 ret = smu_v13_0_set_single_dpm_table(smu,
662 SMU_VCLK,
663 dpm_table);
664 if (ret)
665 return ret;
666 } else {
667 dpm_table->count = 1;
668 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
669 dpm_table->dpm_levels[0].enabled = true;
670 dpm_table->min = dpm_table->dpm_levels[0].value;
671 dpm_table->max = dpm_table->dpm_levels[0].value;
672 }
673
674 /* dclk dpm table setup */
675 dpm_table = &dpm_context->dpm_tables.dclk_table;
676 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
677 ret = smu_v13_0_set_single_dpm_table(smu,
678 SMU_DCLK,
679 dpm_table);
680 if (ret)
681 return ret;
682 } else {
683 dpm_table->count = 1;
684 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
685 dpm_table->dpm_levels[0].enabled = true;
686 dpm_table->min = dpm_table->dpm_levels[0].value;
687 dpm_table->max = dpm_table->dpm_levels[0].value;
688 }
689
690 /* lclk dpm table setup */
691 pcie_table = &dpm_context->dpm_tables.pcie_table;
692 pcie_table->num_of_link_levels = 0;
693 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
694 if (!skutable->PcieGenSpeed[link_level] &&
695 !skutable->PcieLaneCount[link_level] &&
696 !skutable->LclkFreq[link_level])
697 continue;
698
699 pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
700 skutable->PcieGenSpeed[link_level];
701 pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
702 skutable->PcieLaneCount[link_level];
703 pcie_table->clk_freq[pcie_table->num_of_link_levels] =
704 skutable->LclkFreq[link_level];
705 pcie_table->num_of_link_levels++;
706 }
707
708 /* dcefclk dpm table setup */
709 dpm_table = &dpm_context->dpm_tables.dcef_table;
710 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
711 ret = smu_v13_0_set_single_dpm_table(smu,
712 SMU_DCEFCLK,
713 dpm_table);
714 if (ret)
715 return ret;
716 } else {
717 dpm_table->count = 1;
718 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
719 dpm_table->dpm_levels[0].enabled = true;
720 dpm_table->min = dpm_table->dpm_levels[0].value;
721 dpm_table->max = dpm_table->dpm_levels[0].value;
722 }
723
724 return 0;
725 }
726
smu_v13_0_7_is_dpm_running(struct smu_context * smu)727 static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu)
728 {
729 int ret = 0;
730 uint64_t feature_enabled;
731
732 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
733 if (ret)
734 return false;
735
736 return !!(feature_enabled & SMC_DPM_FEATURE);
737 }
738
smu_v13_0_7_get_throttler_status(SmuMetrics_t * metrics)739 static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics)
740 {
741 uint32_t throttler_status = 0;
742 int i;
743
744 for (i = 0; i < THROTTLER_COUNT; i++)
745 throttler_status |=
746 (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
747
748 return throttler_status;
749 }
750
751 #define SMU_13_0_7_BUSY_THRESHOLD 15
smu_v13_0_7_get_smu_metrics_data(struct smu_context * smu,MetricsMember_t member,uint32_t * value)752 static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
753 MetricsMember_t member,
754 uint32_t *value)
755 {
756 struct smu_table_context *smu_table = &smu->smu_table;
757 SmuMetrics_t *metrics =
758 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
759 int ret = 0;
760
761 ret = smu_cmn_get_metrics_table(smu,
762 NULL,
763 false);
764 if (ret)
765 return ret;
766
767 switch (member) {
768 case METRICS_CURR_GFXCLK:
769 *value = metrics->CurrClock[PPCLK_GFXCLK];
770 break;
771 case METRICS_CURR_SOCCLK:
772 *value = metrics->CurrClock[PPCLK_SOCCLK];
773 break;
774 case METRICS_CURR_UCLK:
775 *value = metrics->CurrClock[PPCLK_UCLK];
776 break;
777 case METRICS_CURR_VCLK:
778 *value = metrics->CurrClock[PPCLK_VCLK_0];
779 break;
780 case METRICS_CURR_VCLK1:
781 *value = metrics->CurrClock[PPCLK_VCLK_1];
782 break;
783 case METRICS_CURR_DCLK:
784 *value = metrics->CurrClock[PPCLK_DCLK_0];
785 break;
786 case METRICS_CURR_DCLK1:
787 *value = metrics->CurrClock[PPCLK_DCLK_1];
788 break;
789 case METRICS_CURR_FCLK:
790 *value = metrics->CurrClock[PPCLK_FCLK];
791 break;
792 case METRICS_CURR_DCEFCLK:
793 *value = metrics->CurrClock[PPCLK_DCFCLK];
794 break;
795 case METRICS_AVERAGE_GFXCLK:
796 *value = metrics->AverageGfxclkFrequencyPreDs;
797 break;
798 case METRICS_AVERAGE_FCLK:
799 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD)
800 *value = metrics->AverageFclkFrequencyPostDs;
801 else
802 *value = metrics->AverageFclkFrequencyPreDs;
803 break;
804 case METRICS_AVERAGE_UCLK:
805 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD)
806 *value = metrics->AverageMemclkFrequencyPostDs;
807 else
808 *value = metrics->AverageMemclkFrequencyPreDs;
809 break;
810 case METRICS_AVERAGE_VCLK:
811 *value = metrics->AverageVclk0Frequency;
812 break;
813 case METRICS_AVERAGE_DCLK:
814 *value = metrics->AverageDclk0Frequency;
815 break;
816 case METRICS_AVERAGE_VCLK1:
817 *value = metrics->AverageVclk1Frequency;
818 break;
819 case METRICS_AVERAGE_DCLK1:
820 *value = metrics->AverageDclk1Frequency;
821 break;
822 case METRICS_AVERAGE_GFXACTIVITY:
823 *value = metrics->AverageGfxActivity;
824 break;
825 case METRICS_AVERAGE_MEMACTIVITY:
826 *value = metrics->AverageUclkActivity;
827 break;
828 case METRICS_AVERAGE_SOCKETPOWER:
829 *value = metrics->AverageSocketPower << 8;
830 break;
831 case METRICS_TEMPERATURE_EDGE:
832 *value = metrics->AvgTemperature[TEMP_EDGE] *
833 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
834 break;
835 case METRICS_TEMPERATURE_HOTSPOT:
836 *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
837 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
838 break;
839 case METRICS_TEMPERATURE_MEM:
840 *value = metrics->AvgTemperature[TEMP_MEM] *
841 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
842 break;
843 case METRICS_TEMPERATURE_VRGFX:
844 *value = metrics->AvgTemperature[TEMP_VR_GFX] *
845 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
846 break;
847 case METRICS_TEMPERATURE_VRSOC:
848 *value = metrics->AvgTemperature[TEMP_VR_SOC] *
849 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
850 break;
851 case METRICS_THROTTLER_STATUS:
852 *value = smu_v13_0_7_get_throttler_status(metrics);
853 break;
854 case METRICS_CURR_FANSPEED:
855 *value = metrics->AvgFanRpm;
856 break;
857 case METRICS_CURR_FANPWM:
858 *value = metrics->AvgFanPwm;
859 break;
860 case METRICS_VOLTAGE_VDDGFX:
861 *value = metrics->AvgVoltage[SVI_PLANE_GFX];
862 break;
863 case METRICS_PCIE_RATE:
864 *value = metrics->PcieRate;
865 break;
866 case METRICS_PCIE_WIDTH:
867 *value = metrics->PcieWidth;
868 break;
869 default:
870 *value = UINT_MAX;
871 break;
872 }
873
874 return ret;
875 }
876
smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)877 static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
878 enum smu_clk_type clk_type,
879 uint32_t *min,
880 uint32_t *max)
881 {
882 struct smu_13_0_dpm_context *dpm_context =
883 smu->smu_dpm.dpm_context;
884 struct smu_13_0_dpm_table *dpm_table;
885
886 switch (clk_type) {
887 case SMU_MCLK:
888 case SMU_UCLK:
889 /* uclk dpm table */
890 dpm_table = &dpm_context->dpm_tables.uclk_table;
891 break;
892 case SMU_GFXCLK:
893 case SMU_SCLK:
894 /* gfxclk dpm table */
895 dpm_table = &dpm_context->dpm_tables.gfx_table;
896 break;
897 case SMU_SOCCLK:
898 /* socclk dpm table */
899 dpm_table = &dpm_context->dpm_tables.soc_table;
900 break;
901 case SMU_FCLK:
902 /* fclk dpm table */
903 dpm_table = &dpm_context->dpm_tables.fclk_table;
904 break;
905 case SMU_VCLK:
906 case SMU_VCLK1:
907 /* vclk dpm table */
908 dpm_table = &dpm_context->dpm_tables.vclk_table;
909 break;
910 case SMU_DCLK:
911 case SMU_DCLK1:
912 /* dclk dpm table */
913 dpm_table = &dpm_context->dpm_tables.dclk_table;
914 break;
915 default:
916 dev_err(smu->adev->dev, "Unsupported clock type!\n");
917 return -EINVAL;
918 }
919
920 if (min)
921 *min = dpm_table->min;
922 if (max)
923 *max = dpm_table->max;
924
925 return 0;
926 }
927
smu_v13_0_7_read_sensor(struct smu_context * smu,enum amd_pp_sensors sensor,void * data,uint32_t * size)928 static int smu_v13_0_7_read_sensor(struct smu_context *smu,
929 enum amd_pp_sensors sensor,
930 void *data,
931 uint32_t *size)
932 {
933 struct smu_table_context *table_context = &smu->smu_table;
934 PPTable_t *smc_pptable = table_context->driver_pptable;
935 int ret = 0;
936
937 switch (sensor) {
938 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
939 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm;
940 *size = 4;
941 break;
942 case AMDGPU_PP_SENSOR_MEM_LOAD:
943 ret = smu_v13_0_7_get_smu_metrics_data(smu,
944 METRICS_AVERAGE_MEMACTIVITY,
945 (uint32_t *)data);
946 *size = 4;
947 break;
948 case AMDGPU_PP_SENSOR_GPU_LOAD:
949 ret = smu_v13_0_7_get_smu_metrics_data(smu,
950 METRICS_AVERAGE_GFXACTIVITY,
951 (uint32_t *)data);
952 *size = 4;
953 break;
954 case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
955 ret = smu_v13_0_7_get_smu_metrics_data(smu,
956 METRICS_AVERAGE_SOCKETPOWER,
957 (uint32_t *)data);
958 *size = 4;
959 break;
960 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
961 ret = smu_v13_0_7_get_smu_metrics_data(smu,
962 METRICS_TEMPERATURE_HOTSPOT,
963 (uint32_t *)data);
964 *size = 4;
965 break;
966 case AMDGPU_PP_SENSOR_EDGE_TEMP:
967 ret = smu_v13_0_7_get_smu_metrics_data(smu,
968 METRICS_TEMPERATURE_EDGE,
969 (uint32_t *)data);
970 *size = 4;
971 break;
972 case AMDGPU_PP_SENSOR_MEM_TEMP:
973 ret = smu_v13_0_7_get_smu_metrics_data(smu,
974 METRICS_TEMPERATURE_MEM,
975 (uint32_t *)data);
976 *size = 4;
977 break;
978 case AMDGPU_PP_SENSOR_GFX_MCLK:
979 ret = smu_v13_0_7_get_smu_metrics_data(smu,
980 METRICS_CURR_UCLK,
981 (uint32_t *)data);
982 *(uint32_t *)data *= 100;
983 *size = 4;
984 break;
985 case AMDGPU_PP_SENSOR_GFX_SCLK:
986 ret = smu_v13_0_7_get_smu_metrics_data(smu,
987 METRICS_AVERAGE_GFXCLK,
988 (uint32_t *)data);
989 *(uint32_t *)data *= 100;
990 *size = 4;
991 break;
992 case AMDGPU_PP_SENSOR_VDDGFX:
993 ret = smu_v13_0_7_get_smu_metrics_data(smu,
994 METRICS_VOLTAGE_VDDGFX,
995 (uint32_t *)data);
996 *size = 4;
997 break;
998 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
999 default:
1000 ret = -EOPNOTSUPP;
1001 break;
1002 }
1003
1004 return ret;
1005 }
1006
smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)1007 static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu,
1008 enum smu_clk_type clk_type,
1009 uint32_t *value)
1010 {
1011 MetricsMember_t member_type;
1012 int clk_id = 0;
1013
1014 clk_id = smu_cmn_to_asic_specific_index(smu,
1015 CMN2ASIC_MAPPING_CLK,
1016 clk_type);
1017 if (clk_id < 0)
1018 return -EINVAL;
1019
1020 switch (clk_id) {
1021 case PPCLK_GFXCLK:
1022 member_type = METRICS_AVERAGE_GFXCLK;
1023 break;
1024 case PPCLK_UCLK:
1025 member_type = METRICS_CURR_UCLK;
1026 break;
1027 case PPCLK_FCLK:
1028 member_type = METRICS_CURR_FCLK;
1029 break;
1030 case PPCLK_SOCCLK:
1031 member_type = METRICS_CURR_SOCCLK;
1032 break;
1033 case PPCLK_VCLK_0:
1034 member_type = METRICS_CURR_VCLK;
1035 break;
1036 case PPCLK_DCLK_0:
1037 member_type = METRICS_CURR_DCLK;
1038 break;
1039 case PPCLK_VCLK_1:
1040 member_type = METRICS_CURR_VCLK1;
1041 break;
1042 case PPCLK_DCLK_1:
1043 member_type = METRICS_CURR_DCLK1;
1044 break;
1045 case PPCLK_DCFCLK:
1046 member_type = METRICS_CURR_DCEFCLK;
1047 break;
1048 default:
1049 return -EINVAL;
1050 }
1051
1052 return smu_v13_0_7_get_smu_metrics_data(smu,
1053 member_type,
1054 value);
1055 }
1056
smu_v13_0_7_is_od_feature_supported(struct smu_context * smu,int od_feature_bit)1057 static bool smu_v13_0_7_is_od_feature_supported(struct smu_context *smu,
1058 int od_feature_bit)
1059 {
1060 PPTable_t *pptable = smu->smu_table.driver_pptable;
1061 const OverDriveLimits_t * const overdrive_upperlimits =
1062 &pptable->SkuTable.OverDriveLimitsBasicMax;
1063
1064 return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
1065 }
1066
smu_v13_0_7_get_od_setting_limits(struct smu_context * smu,int od_feature_bit,int32_t * min,int32_t * max)1067 static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
1068 int od_feature_bit,
1069 int32_t *min,
1070 int32_t *max)
1071 {
1072 PPTable_t *pptable = smu->smu_table.driver_pptable;
1073 const OverDriveLimits_t * const overdrive_upperlimits =
1074 &pptable->SkuTable.OverDriveLimitsBasicMax;
1075 const OverDriveLimits_t * const overdrive_lowerlimits =
1076 &pptable->SkuTable.OverDriveLimitsMin;
1077 int32_t od_min_setting, od_max_setting;
1078
1079 switch (od_feature_bit) {
1080 case PP_OD_FEATURE_GFXCLK_FMIN:
1081 od_min_setting = overdrive_lowerlimits->GfxclkFmin;
1082 od_max_setting = overdrive_upperlimits->GfxclkFmin;
1083 break;
1084 case PP_OD_FEATURE_GFXCLK_FMAX:
1085 od_min_setting = overdrive_lowerlimits->GfxclkFmax;
1086 od_max_setting = overdrive_upperlimits->GfxclkFmax;
1087 break;
1088 case PP_OD_FEATURE_UCLK_FMIN:
1089 od_min_setting = overdrive_lowerlimits->UclkFmin;
1090 od_max_setting = overdrive_upperlimits->UclkFmin;
1091 break;
1092 case PP_OD_FEATURE_UCLK_FMAX:
1093 od_min_setting = overdrive_lowerlimits->UclkFmax;
1094 od_max_setting = overdrive_upperlimits->UclkFmax;
1095 break;
1096 case PP_OD_FEATURE_GFX_VF_CURVE:
1097 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary;
1098 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary;
1099 break;
1100 case PP_OD_FEATURE_FAN_CURVE_TEMP:
1101 od_min_setting = overdrive_lowerlimits->FanLinearTempPoints;
1102 od_max_setting = overdrive_upperlimits->FanLinearTempPoints;
1103 break;
1104 case PP_OD_FEATURE_FAN_CURVE_PWM:
1105 od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints;
1106 od_max_setting = overdrive_upperlimits->FanLinearPwmPoints;
1107 break;
1108 case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
1109 od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
1110 od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
1111 break;
1112 case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
1113 od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
1114 od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
1115 break;
1116 case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
1117 od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
1118 od_max_setting = overdrive_upperlimits->FanTargetTemperature;
1119 break;
1120 case PP_OD_FEATURE_FAN_MINIMUM_PWM:
1121 od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
1122 od_max_setting = overdrive_upperlimits->FanMinimumPwm;
1123 break;
1124 case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE:
1125 od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable;
1126 od_max_setting = overdrive_upperlimits->FanZeroRpmEnable;
1127 break;
1128 case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP:
1129 od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp;
1130 od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp;
1131 break;
1132 default:
1133 od_min_setting = od_max_setting = INT_MAX;
1134 break;
1135 }
1136
1137 if (min)
1138 *min = od_min_setting;
1139 if (max)
1140 *max = od_max_setting;
1141 }
1142
smu_v13_0_7_dump_od_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)1143 static void smu_v13_0_7_dump_od_table(struct smu_context *smu,
1144 OverDriveTableExternal_t *od_table)
1145 {
1146 struct amdgpu_device *adev = smu->adev;
1147
1148 dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
1149 od_table->OverDriveTable.GfxclkFmax);
1150 dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
1151 od_table->OverDriveTable.UclkFmax);
1152 }
1153
smu_v13_0_7_get_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)1154 static int smu_v13_0_7_get_overdrive_table(struct smu_context *smu,
1155 OverDriveTableExternal_t *od_table)
1156 {
1157 int ret = 0;
1158
1159 ret = smu_cmn_update_table(smu,
1160 SMU_TABLE_OVERDRIVE,
1161 0,
1162 (void *)od_table,
1163 false);
1164 if (ret)
1165 dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
1166
1167 return ret;
1168 }
1169
smu_v13_0_7_upload_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)1170 static int smu_v13_0_7_upload_overdrive_table(struct smu_context *smu,
1171 OverDriveTableExternal_t *od_table)
1172 {
1173 int ret = 0;
1174
1175 ret = smu_cmn_update_table(smu,
1176 SMU_TABLE_OVERDRIVE,
1177 0,
1178 (void *)od_table,
1179 true);
1180 if (ret)
1181 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
1182
1183 return ret;
1184 }
1185
smu_v13_0_7_print_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)1186 static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
1187 enum smu_clk_type clk_type,
1188 char *buf)
1189 {
1190 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1191 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1192 OverDriveTableExternal_t *od_table =
1193 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
1194 struct smu_13_0_dpm_table *single_dpm_table;
1195 struct smu_13_0_pcie_table *pcie_table;
1196 uint32_t gen_speed, lane_width;
1197 int i, curr_freq, size = 0;
1198 int32_t min_value, max_value;
1199 int ret = 0;
1200
1201 smu_cmn_get_sysfs_buf(&buf, &size);
1202
1203 if (amdgpu_ras_intr_triggered()) {
1204 size += sysfs_emit_at(buf, size, "unavailable\n");
1205 return size;
1206 }
1207
1208 switch (clk_type) {
1209 case SMU_SCLK:
1210 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1211 break;
1212 case SMU_MCLK:
1213 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1214 break;
1215 case SMU_SOCCLK:
1216 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1217 break;
1218 case SMU_FCLK:
1219 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1220 break;
1221 case SMU_VCLK:
1222 case SMU_VCLK1:
1223 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1224 break;
1225 case SMU_DCLK:
1226 case SMU_DCLK1:
1227 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1228 break;
1229 case SMU_DCEFCLK:
1230 single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
1231 break;
1232 default:
1233 break;
1234 }
1235
1236 switch (clk_type) {
1237 case SMU_SCLK:
1238 case SMU_MCLK:
1239 case SMU_SOCCLK:
1240 case SMU_FCLK:
1241 case SMU_VCLK:
1242 case SMU_VCLK1:
1243 case SMU_DCLK:
1244 case SMU_DCLK1:
1245 case SMU_DCEFCLK:
1246 ret = smu_v13_0_7_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1247 if (ret) {
1248 dev_err(smu->adev->dev, "Failed to get current clock freq!");
1249 return ret;
1250 }
1251
1252 if (single_dpm_table->is_fine_grained) {
1253 /*
1254 * For fine grained dpms, there are only two dpm levels:
1255 * - level 0 -> min clock freq
1256 * - level 1 -> max clock freq
1257 * And the current clock frequency can be any value between them.
1258 * So, if the current clock frequency is not at level 0 or level 1,
1259 * we will fake it as three dpm levels:
1260 * - level 0 -> min clock freq
1261 * - level 1 -> current actual clock freq
1262 * - level 2 -> max clock freq
1263 */
1264 if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1265 (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1266 size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1267 single_dpm_table->dpm_levels[0].value);
1268 size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1269 curr_freq);
1270 size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1271 single_dpm_table->dpm_levels[1].value);
1272 } else {
1273 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1274 single_dpm_table->dpm_levels[0].value,
1275 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1276 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1277 single_dpm_table->dpm_levels[1].value,
1278 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1279 }
1280 } else {
1281 for (i = 0; i < single_dpm_table->count; i++)
1282 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1283 i, single_dpm_table->dpm_levels[i].value,
1284 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1285 }
1286 break;
1287 case SMU_PCIE:
1288 ret = smu_v13_0_7_get_smu_metrics_data(smu,
1289 METRICS_PCIE_RATE,
1290 &gen_speed);
1291 if (ret)
1292 return ret;
1293
1294 ret = smu_v13_0_7_get_smu_metrics_data(smu,
1295 METRICS_PCIE_WIDTH,
1296 &lane_width);
1297 if (ret)
1298 return ret;
1299
1300 pcie_table = &(dpm_context->dpm_tables.pcie_table);
1301 for (i = 0; i < pcie_table->num_of_link_levels; i++)
1302 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1303 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1304 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1305 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1306 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "",
1307 (pcie_table->pcie_lane[i] == 1) ? "x1" :
1308 (pcie_table->pcie_lane[i] == 2) ? "x2" :
1309 (pcie_table->pcie_lane[i] == 3) ? "x4" :
1310 (pcie_table->pcie_lane[i] == 4) ? "x8" :
1311 (pcie_table->pcie_lane[i] == 5) ? "x12" :
1312 (pcie_table->pcie_lane[i] == 6) ? "x16" : "",
1313 pcie_table->clk_freq[i],
1314 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
1315 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
1316 "*" : "");
1317 break;
1318
1319 case SMU_OD_SCLK:
1320 if (!smu_v13_0_7_is_od_feature_supported(smu,
1321 PP_OD_FEATURE_GFXCLK_BIT))
1322 break;
1323
1324 size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
1325 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
1326 od_table->OverDriveTable.GfxclkFmin,
1327 od_table->OverDriveTable.GfxclkFmax);
1328 break;
1329
1330 case SMU_OD_MCLK:
1331 if (!smu_v13_0_7_is_od_feature_supported(smu,
1332 PP_OD_FEATURE_UCLK_BIT))
1333 break;
1334
1335 size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
1336 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
1337 od_table->OverDriveTable.UclkFmin,
1338 od_table->OverDriveTable.UclkFmax);
1339 break;
1340
1341 case SMU_OD_VDDGFX_OFFSET:
1342 if (!smu_v13_0_7_is_od_feature_supported(smu,
1343 PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1344 break;
1345
1346 size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
1347 size += sysfs_emit_at(buf, size, "%dmV\n",
1348 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
1349 break;
1350
1351 case SMU_OD_FAN_CURVE:
1352 if (!smu_v13_0_7_is_od_feature_supported(smu,
1353 PP_OD_FEATURE_FAN_CURVE_BIT))
1354 break;
1355
1356 size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
1357 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
1358 size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
1359 i,
1360 (int)od_table->OverDriveTable.FanLinearTempPoints[i],
1361 (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
1362
1363 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1364 smu_v13_0_7_get_od_setting_limits(smu,
1365 PP_OD_FEATURE_FAN_CURVE_TEMP,
1366 &min_value,
1367 &max_value);
1368 size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
1369 min_value, max_value);
1370
1371 smu_v13_0_7_get_od_setting_limits(smu,
1372 PP_OD_FEATURE_FAN_CURVE_PWM,
1373 &min_value,
1374 &max_value);
1375 size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
1376 min_value, max_value);
1377
1378 break;
1379
1380 case SMU_OD_ACOUSTIC_LIMIT:
1381 if (!smu_v13_0_7_is_od_feature_supported(smu,
1382 PP_OD_FEATURE_FAN_CURVE_BIT))
1383 break;
1384
1385 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
1386 size += sysfs_emit_at(buf, size, "%d\n",
1387 (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
1388
1389 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1390 smu_v13_0_7_get_od_setting_limits(smu,
1391 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
1392 &min_value,
1393 &max_value);
1394 size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
1395 min_value, max_value);
1396 break;
1397
1398 case SMU_OD_ACOUSTIC_TARGET:
1399 if (!smu_v13_0_7_is_od_feature_supported(smu,
1400 PP_OD_FEATURE_FAN_CURVE_BIT))
1401 break;
1402
1403 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
1404 size += sysfs_emit_at(buf, size, "%d\n",
1405 (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
1406
1407 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1408 smu_v13_0_7_get_od_setting_limits(smu,
1409 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
1410 &min_value,
1411 &max_value);
1412 size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
1413 min_value, max_value);
1414 break;
1415
1416 case SMU_OD_FAN_TARGET_TEMPERATURE:
1417 if (!smu_v13_0_7_is_od_feature_supported(smu,
1418 PP_OD_FEATURE_FAN_CURVE_BIT))
1419 break;
1420
1421 size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
1422 size += sysfs_emit_at(buf, size, "%d\n",
1423 (int)od_table->OverDriveTable.FanTargetTemperature);
1424
1425 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1426 smu_v13_0_7_get_od_setting_limits(smu,
1427 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
1428 &min_value,
1429 &max_value);
1430 size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
1431 min_value, max_value);
1432 break;
1433
1434 case SMU_OD_FAN_MINIMUM_PWM:
1435 if (!smu_v13_0_7_is_od_feature_supported(smu,
1436 PP_OD_FEATURE_FAN_CURVE_BIT))
1437 break;
1438
1439 size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
1440 size += sysfs_emit_at(buf, size, "%d\n",
1441 (int)od_table->OverDriveTable.FanMinimumPwm);
1442
1443 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1444 smu_v13_0_7_get_od_setting_limits(smu,
1445 PP_OD_FEATURE_FAN_MINIMUM_PWM,
1446 &min_value,
1447 &max_value);
1448 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
1449 min_value, max_value);
1450 break;
1451
1452 case SMU_OD_FAN_ZERO_RPM_ENABLE:
1453 if (!smu_v13_0_7_is_od_feature_supported(smu,
1454 PP_OD_FEATURE_ZERO_FAN_BIT))
1455 break;
1456
1457 size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n");
1458 size += sysfs_emit_at(buf, size, "%d\n",
1459 (int)od_table->OverDriveTable.FanZeroRpmEnable);
1460
1461 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1462 smu_v13_0_7_get_od_setting_limits(smu,
1463 PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
1464 &min_value,
1465 &max_value);
1466 size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n",
1467 min_value, max_value);
1468 break;
1469
1470 case SMU_OD_FAN_ZERO_RPM_STOP_TEMP:
1471 if (!smu_v13_0_7_is_od_feature_supported(smu,
1472 PP_OD_FEATURE_ZERO_FAN_BIT))
1473 break;
1474
1475 size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n");
1476 size += sysfs_emit_at(buf, size, "%d\n",
1477 (int)od_table->OverDriveTable.FanZeroRpmStopTemp);
1478
1479 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1480 smu_v13_0_7_get_od_setting_limits(smu,
1481 PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP,
1482 &min_value,
1483 &max_value);
1484 size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n",
1485 min_value, max_value);
1486 break;
1487
1488 case SMU_OD_RANGE:
1489 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
1490 !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
1491 !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1492 break;
1493
1494 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1495
1496 if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1497 smu_v13_0_7_get_od_setting_limits(smu,
1498 PP_OD_FEATURE_GFXCLK_FMIN,
1499 &min_value,
1500 NULL);
1501 smu_v13_0_7_get_od_setting_limits(smu,
1502 PP_OD_FEATURE_GFXCLK_FMAX,
1503 NULL,
1504 &max_value);
1505 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
1506 min_value, max_value);
1507 }
1508
1509 if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1510 smu_v13_0_7_get_od_setting_limits(smu,
1511 PP_OD_FEATURE_UCLK_FMIN,
1512 &min_value,
1513 NULL);
1514 smu_v13_0_7_get_od_setting_limits(smu,
1515 PP_OD_FEATURE_UCLK_FMAX,
1516 NULL,
1517 &max_value);
1518 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
1519 min_value, max_value);
1520 }
1521
1522 if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1523 smu_v13_0_7_get_od_setting_limits(smu,
1524 PP_OD_FEATURE_GFX_VF_CURVE,
1525 &min_value,
1526 &max_value);
1527 size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
1528 min_value, max_value);
1529 }
1530 break;
1531
1532 default:
1533 break;
1534 }
1535
1536 return size;
1537 }
1538
smu_v13_0_7_od_restore_table_single(struct smu_context * smu,long input)1539 static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input)
1540 {
1541 struct smu_table_context *table_context = &smu->smu_table;
1542 OverDriveTableExternal_t *boot_overdrive_table =
1543 (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
1544 OverDriveTableExternal_t *od_table =
1545 (OverDriveTableExternal_t *)table_context->overdrive_table;
1546 struct amdgpu_device *adev = smu->adev;
1547 int i;
1548
1549 switch (input) {
1550 case PP_OD_EDIT_FAN_CURVE:
1551 for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
1552 od_table->OverDriveTable.FanLinearTempPoints[i] =
1553 boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
1554 od_table->OverDriveTable.FanLinearPwmPoints[i] =
1555 boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
1556 }
1557 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1558 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1559 break;
1560 case PP_OD_EDIT_ACOUSTIC_LIMIT:
1561 od_table->OverDriveTable.AcousticLimitRpmThreshold =
1562 boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
1563 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1564 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1565 break;
1566 case PP_OD_EDIT_ACOUSTIC_TARGET:
1567 od_table->OverDriveTable.AcousticTargetRpmThreshold =
1568 boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
1569 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1570 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1571 break;
1572 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
1573 od_table->OverDriveTable.FanTargetTemperature =
1574 boot_overdrive_table->OverDriveTable.FanTargetTemperature;
1575 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1576 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1577 break;
1578 case PP_OD_EDIT_FAN_MINIMUM_PWM:
1579 od_table->OverDriveTable.FanMinimumPwm =
1580 boot_overdrive_table->OverDriveTable.FanMinimumPwm;
1581 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1582 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1583 break;
1584 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
1585 od_table->OverDriveTable.FanZeroRpmEnable =
1586 boot_overdrive_table->OverDriveTable.FanZeroRpmEnable;
1587 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
1588 break;
1589 case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP:
1590 od_table->OverDriveTable.FanZeroRpmStopTemp =
1591 boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp;
1592 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
1593 break;
1594 default:
1595 dev_info(adev->dev, "Invalid table index: %ld\n", input);
1596 return -EINVAL;
1597 }
1598
1599 return 0;
1600 }
1601
smu_v13_0_7_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)1602 static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu,
1603 enum PP_OD_DPM_TABLE_COMMAND type,
1604 long input[],
1605 uint32_t size)
1606 {
1607 struct smu_table_context *table_context = &smu->smu_table;
1608 OverDriveTableExternal_t *od_table =
1609 (OverDriveTableExternal_t *)table_context->overdrive_table;
1610 struct amdgpu_device *adev = smu->adev;
1611 uint32_t offset_of_voltageoffset;
1612 int32_t minimum, maximum;
1613 uint32_t feature_ctrlmask;
1614 int i, ret = 0;
1615
1616 switch (type) {
1617 case PP_OD_EDIT_SCLK_VDDC_TABLE:
1618 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1619 dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
1620 return -ENOTSUPP;
1621 }
1622
1623 for (i = 0; i < size; i += 2) {
1624 if (i + 2 > size) {
1625 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
1626 return -EINVAL;
1627 }
1628
1629 switch (input[i]) {
1630 case 0:
1631 smu_v13_0_7_get_od_setting_limits(smu,
1632 PP_OD_FEATURE_GFXCLK_FMIN,
1633 &minimum,
1634 &maximum);
1635 if (input[i + 1] < minimum ||
1636 input[i + 1] > maximum) {
1637 dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
1638 input[i + 1], minimum, maximum);
1639 return -EINVAL;
1640 }
1641
1642 od_table->OverDriveTable.GfxclkFmin = input[i + 1];
1643 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
1644 break;
1645
1646 case 1:
1647 smu_v13_0_7_get_od_setting_limits(smu,
1648 PP_OD_FEATURE_GFXCLK_FMAX,
1649 &minimum,
1650 &maximum);
1651 if (input[i + 1] < minimum ||
1652 input[i + 1] > maximum) {
1653 dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
1654 input[i + 1], minimum, maximum);
1655 return -EINVAL;
1656 }
1657
1658 od_table->OverDriveTable.GfxclkFmax = input[i + 1];
1659 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
1660 break;
1661
1662 default:
1663 dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
1664 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
1665 return -EINVAL;
1666 }
1667 }
1668
1669 if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
1670 dev_err(adev->dev,
1671 "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
1672 (uint32_t)od_table->OverDriveTable.GfxclkFmin,
1673 (uint32_t)od_table->OverDriveTable.GfxclkFmax);
1674 return -EINVAL;
1675 }
1676 break;
1677
1678 case PP_OD_EDIT_MCLK_VDDC_TABLE:
1679 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1680 dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
1681 return -ENOTSUPP;
1682 }
1683
1684 for (i = 0; i < size; i += 2) {
1685 if (i + 2 > size) {
1686 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
1687 return -EINVAL;
1688 }
1689
1690 switch (input[i]) {
1691 case 0:
1692 smu_v13_0_7_get_od_setting_limits(smu,
1693 PP_OD_FEATURE_UCLK_FMIN,
1694 &minimum,
1695 &maximum);
1696 if (input[i + 1] < minimum ||
1697 input[i + 1] > maximum) {
1698 dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
1699 input[i + 1], minimum, maximum);
1700 return -EINVAL;
1701 }
1702
1703 od_table->OverDriveTable.UclkFmin = input[i + 1];
1704 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
1705 break;
1706
1707 case 1:
1708 smu_v13_0_7_get_od_setting_limits(smu,
1709 PP_OD_FEATURE_UCLK_FMAX,
1710 &minimum,
1711 &maximum);
1712 if (input[i + 1] < minimum ||
1713 input[i + 1] > maximum) {
1714 dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
1715 input[i + 1], minimum, maximum);
1716 return -EINVAL;
1717 }
1718
1719 od_table->OverDriveTable.UclkFmax = input[i + 1];
1720 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
1721 break;
1722
1723 default:
1724 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
1725 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
1726 return -EINVAL;
1727 }
1728 }
1729
1730 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
1731 dev_err(adev->dev,
1732 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
1733 (uint32_t)od_table->OverDriveTable.UclkFmin,
1734 (uint32_t)od_table->OverDriveTable.UclkFmax);
1735 return -EINVAL;
1736 }
1737 break;
1738
1739 case PP_OD_EDIT_VDDGFX_OFFSET:
1740 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1741 dev_warn(adev->dev, "Gfx offset setting not supported!\n");
1742 return -ENOTSUPP;
1743 }
1744
1745 smu_v13_0_7_get_od_setting_limits(smu,
1746 PP_OD_FEATURE_GFX_VF_CURVE,
1747 &minimum,
1748 &maximum);
1749 if (input[0] < minimum ||
1750 input[0] > maximum) {
1751 dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
1752 input[0], minimum, maximum);
1753 return -EINVAL;
1754 }
1755
1756 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
1757 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
1758 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
1759 break;
1760
1761 case PP_OD_EDIT_FAN_CURVE:
1762 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1763 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1764 return -ENOTSUPP;
1765 }
1766
1767 if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
1768 input[0] < 0)
1769 return -EINVAL;
1770
1771 smu_v13_0_7_get_od_setting_limits(smu,
1772 PP_OD_FEATURE_FAN_CURVE_TEMP,
1773 &minimum,
1774 &maximum);
1775 if (input[1] < minimum ||
1776 input[1] > maximum) {
1777 dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
1778 input[1], minimum, maximum);
1779 return -EINVAL;
1780 }
1781
1782 smu_v13_0_7_get_od_setting_limits(smu,
1783 PP_OD_FEATURE_FAN_CURVE_PWM,
1784 &minimum,
1785 &maximum);
1786 if (input[2] < minimum ||
1787 input[2] > maximum) {
1788 dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
1789 input[2], minimum, maximum);
1790 return -EINVAL;
1791 }
1792
1793 od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
1794 od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
1795 od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
1796 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1797 break;
1798
1799 case PP_OD_EDIT_ACOUSTIC_LIMIT:
1800 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1801 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1802 return -ENOTSUPP;
1803 }
1804
1805 smu_v13_0_7_get_od_setting_limits(smu,
1806 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
1807 &minimum,
1808 &maximum);
1809 if (input[0] < minimum ||
1810 input[0] > maximum) {
1811 dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
1812 input[0], minimum, maximum);
1813 return -EINVAL;
1814 }
1815
1816 od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
1817 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1818 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1819 break;
1820
1821 case PP_OD_EDIT_ACOUSTIC_TARGET:
1822 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1823 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1824 return -ENOTSUPP;
1825 }
1826
1827 smu_v13_0_7_get_od_setting_limits(smu,
1828 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
1829 &minimum,
1830 &maximum);
1831 if (input[0] < minimum ||
1832 input[0] > maximum) {
1833 dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
1834 input[0], minimum, maximum);
1835 return -EINVAL;
1836 }
1837
1838 od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
1839 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1840 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1841 break;
1842
1843 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
1844 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1845 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1846 return -ENOTSUPP;
1847 }
1848
1849 smu_v13_0_7_get_od_setting_limits(smu,
1850 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
1851 &minimum,
1852 &maximum);
1853 if (input[0] < minimum ||
1854 input[0] > maximum) {
1855 dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
1856 input[0], minimum, maximum);
1857 return -EINVAL;
1858 }
1859
1860 od_table->OverDriveTable.FanTargetTemperature = input[0];
1861 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1862 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1863 break;
1864
1865 case PP_OD_EDIT_FAN_MINIMUM_PWM:
1866 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
1867 dev_warn(adev->dev, "Fan curve setting not supported!\n");
1868 return -ENOTSUPP;
1869 }
1870
1871 smu_v13_0_7_get_od_setting_limits(smu,
1872 PP_OD_FEATURE_FAN_MINIMUM_PWM,
1873 &minimum,
1874 &maximum);
1875 if (input[0] < minimum ||
1876 input[0] > maximum) {
1877 dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
1878 input[0], minimum, maximum);
1879 return -EINVAL;
1880 }
1881
1882 od_table->OverDriveTable.FanMinimumPwm = input[0];
1883 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
1884 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
1885 break;
1886
1887 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
1888 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
1889 dev_warn(adev->dev, "Zero RPM setting not supported!\n");
1890 return -ENOTSUPP;
1891 }
1892
1893 smu_v13_0_7_get_od_setting_limits(smu,
1894 PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
1895 &minimum,
1896 &maximum);
1897 if (input[0] < minimum ||
1898 input[0] > maximum) {
1899 dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n",
1900 input[0], minimum, maximum);
1901 return -EINVAL;
1902 }
1903
1904 od_table->OverDriveTable.FanZeroRpmEnable = input[0];
1905 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
1906 break;
1907
1908 case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP:
1909 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
1910 dev_warn(adev->dev, "Zero RPM setting not supported!\n");
1911 return -ENOTSUPP;
1912 }
1913
1914 smu_v13_0_7_get_od_setting_limits(smu,
1915 PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP,
1916 &minimum,
1917 &maximum);
1918 if (input[0] < minimum ||
1919 input[0] > maximum) {
1920 dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n",
1921 input[0], minimum, maximum);
1922 return -EINVAL;
1923 }
1924
1925 od_table->OverDriveTable.FanZeroRpmStopTemp = input[0];
1926 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
1927 break;
1928
1929 case PP_OD_RESTORE_DEFAULT_TABLE:
1930 if (size == 1) {
1931 ret = smu_v13_0_7_od_restore_table_single(smu, input[0]);
1932 if (ret)
1933 return ret;
1934 } else {
1935 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
1936 memcpy(od_table,
1937 table_context->boot_overdrive_table,
1938 sizeof(OverDriveTableExternal_t));
1939 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
1940 }
1941 fallthrough;
1942
1943 case PP_OD_COMMIT_DPM_TABLE:
1944 /*
1945 * The member below instructs PMFW the settings focused in
1946 * this single operation.
1947 * `uint32_t FeatureCtrlMask;`
1948 * It does not contain actual informations about user's custom
1949 * settings. Thus we do not cache it.
1950 */
1951 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
1952 if (memcmp((u8 *)od_table + offset_of_voltageoffset,
1953 table_context->user_overdrive_table + offset_of_voltageoffset,
1954 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
1955 smu_v13_0_7_dump_od_table(smu, od_table);
1956
1957 ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
1958 if (ret) {
1959 dev_err(adev->dev, "Failed to upload overdrive table!\n");
1960 return ret;
1961 }
1962
1963 od_table->OverDriveTable.FeatureCtrlMask = 0;
1964 memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
1965 (u8 *)od_table + offset_of_voltageoffset,
1966 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
1967
1968 if (!memcmp(table_context->user_overdrive_table,
1969 table_context->boot_overdrive_table,
1970 sizeof(OverDriveTableExternal_t)))
1971 smu->user_dpm_profile.user_od = false;
1972 else
1973 smu->user_dpm_profile.user_od = true;
1974 }
1975 break;
1976
1977 default:
1978 return -ENOSYS;
1979 }
1980
1981 return ret;
1982 }
1983
smu_v13_0_7_force_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)1984 static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
1985 enum smu_clk_type clk_type,
1986 uint32_t mask)
1987 {
1988 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1989 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1990 struct smu_13_0_dpm_table *single_dpm_table;
1991 uint32_t soft_min_level, soft_max_level;
1992 uint32_t min_freq, max_freq;
1993 int ret = 0;
1994
1995 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1996 soft_max_level = mask ? (fls(mask) - 1) : 0;
1997
1998 switch (clk_type) {
1999 case SMU_GFXCLK:
2000 case SMU_SCLK:
2001 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
2002 break;
2003 case SMU_MCLK:
2004 case SMU_UCLK:
2005 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
2006 break;
2007 case SMU_SOCCLK:
2008 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
2009 break;
2010 case SMU_FCLK:
2011 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
2012 break;
2013 case SMU_VCLK:
2014 case SMU_VCLK1:
2015 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
2016 break;
2017 case SMU_DCLK:
2018 case SMU_DCLK1:
2019 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
2020 break;
2021 default:
2022 break;
2023 }
2024
2025 switch (clk_type) {
2026 case SMU_GFXCLK:
2027 case SMU_SCLK:
2028 case SMU_MCLK:
2029 case SMU_UCLK:
2030 case SMU_SOCCLK:
2031 case SMU_FCLK:
2032 case SMU_VCLK:
2033 case SMU_VCLK1:
2034 case SMU_DCLK:
2035 case SMU_DCLK1:
2036 if (single_dpm_table->is_fine_grained) {
2037 /* There is only 2 levels for fine grained DPM */
2038 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
2039 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
2040 } else {
2041 if ((soft_max_level >= single_dpm_table->count) ||
2042 (soft_min_level >= single_dpm_table->count))
2043 return -EINVAL;
2044 }
2045
2046 min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
2047 max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
2048
2049 ret = smu_v13_0_set_soft_freq_limited_range(smu,
2050 clk_type,
2051 min_freq,
2052 max_freq,
2053 false);
2054 break;
2055 case SMU_DCEFCLK:
2056 case SMU_PCIE:
2057 default:
2058 break;
2059 }
2060
2061 return ret;
2062 }
2063
2064 static const struct smu_temperature_range smu13_thermal_policy[] = {
2065 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
2066 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
2067 };
2068
smu_v13_0_7_get_thermal_temperature_range(struct smu_context * smu,struct smu_temperature_range * range)2069 static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu,
2070 struct smu_temperature_range *range)
2071 {
2072 struct smu_table_context *table_context = &smu->smu_table;
2073 struct smu_13_0_7_powerplay_table *powerplay_table =
2074 table_context->power_play_table;
2075 PPTable_t *pptable = smu->smu_table.driver_pptable;
2076
2077 if (!range)
2078 return -EINVAL;
2079
2080 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
2081
2082 range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] *
2083 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2084 range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
2085 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2086 range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] *
2087 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2088 range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
2089 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2090 range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] *
2091 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2092 range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
2093 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2094 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
2095 range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
2096
2097 return 0;
2098 }
2099
smu_v13_0_7_get_gpu_metrics(struct smu_context * smu,void ** table)2100 static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
2101 void **table)
2102 {
2103 struct smu_table_context *smu_table = &smu->smu_table;
2104 struct gpu_metrics_v1_3 *gpu_metrics =
2105 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2106 SmuMetricsExternal_t metrics_ext;
2107 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
2108 int ret = 0;
2109
2110 ret = smu_cmn_get_metrics_table(smu,
2111 &metrics_ext,
2112 true);
2113 if (ret)
2114 return ret;
2115
2116 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2117
2118 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
2119 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
2120 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
2121 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
2122 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
2123 gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
2124 metrics->AvgTemperature[TEMP_VR_MEM1]);
2125
2126 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
2127 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
2128 gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
2129 metrics->Vcn1ActivityPercentage);
2130
2131 gpu_metrics->average_socket_power = metrics->AverageSocketPower;
2132 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
2133
2134 if (metrics->AverageGfxActivity <= SMU_13_0_7_BUSY_THRESHOLD)
2135 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
2136 else
2137 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
2138
2139 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD)
2140 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
2141 else
2142 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
2143
2144 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
2145 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
2146 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
2147 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
2148
2149 gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
2150 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
2151 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
2152 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
2153 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
2154 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
2155 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
2156
2157 gpu_metrics->throttle_status =
2158 smu_v13_0_7_get_throttler_status(metrics);
2159 gpu_metrics->indep_throttle_status =
2160 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
2161 smu_v13_0_7_throttler_map);
2162
2163 gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
2164
2165 gpu_metrics->pcie_link_width = metrics->PcieWidth;
2166 if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
2167 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
2168 else
2169 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
2170
2171 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2172
2173 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX];
2174 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC];
2175 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP];
2176
2177 *table = (void *)gpu_metrics;
2178
2179 return sizeof(struct gpu_metrics_v1_3);
2180 }
2181
smu_v13_0_7_set_supported_od_feature_mask(struct smu_context * smu)2182 static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu)
2183 {
2184 struct amdgpu_device *adev = smu->adev;
2185
2186 if (smu_v13_0_7_is_od_feature_supported(smu,
2187 PP_OD_FEATURE_FAN_CURVE_BIT))
2188 adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
2189 OD_OPS_SUPPORT_FAN_CURVE_SET |
2190 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
2191 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
2192 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
2193 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
2194 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
2195 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
2196 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
2197 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET |
2198 OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE |
2199 OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET |
2200 OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE |
2201 OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET;
2202 }
2203
smu_v13_0_7_set_default_od_settings(struct smu_context * smu)2204 static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu)
2205 {
2206 OverDriveTableExternal_t *od_table =
2207 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
2208 OverDriveTableExternal_t *boot_od_table =
2209 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
2210 OverDriveTableExternal_t *user_od_table =
2211 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
2212 OverDriveTableExternal_t user_od_table_bak;
2213 int ret = 0;
2214 int i;
2215
2216 ret = smu_v13_0_7_get_overdrive_table(smu, boot_od_table);
2217 if (ret)
2218 return ret;
2219
2220 smu_v13_0_7_dump_od_table(smu, boot_od_table);
2221
2222 memcpy(od_table,
2223 boot_od_table,
2224 sizeof(OverDriveTableExternal_t));
2225
2226 /*
2227 * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
2228 * but we have to preserve user defined values in "user_od_table".
2229 */
2230 if (!smu->adev->in_suspend) {
2231 memcpy(user_od_table,
2232 boot_od_table,
2233 sizeof(OverDriveTableExternal_t));
2234 smu->user_dpm_profile.user_od = false;
2235 } else if (smu->user_dpm_profile.user_od) {
2236 memcpy(&user_od_table_bak,
2237 user_od_table,
2238 sizeof(OverDriveTableExternal_t));
2239 memcpy(user_od_table,
2240 boot_od_table,
2241 sizeof(OverDriveTableExternal_t));
2242 user_od_table->OverDriveTable.GfxclkFmin =
2243 user_od_table_bak.OverDriveTable.GfxclkFmin;
2244 user_od_table->OverDriveTable.GfxclkFmax =
2245 user_od_table_bak.OverDriveTable.GfxclkFmax;
2246 user_od_table->OverDriveTable.UclkFmin =
2247 user_od_table_bak.OverDriveTable.UclkFmin;
2248 user_od_table->OverDriveTable.UclkFmax =
2249 user_od_table_bak.OverDriveTable.UclkFmax;
2250 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2251 user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
2252 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
2253 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
2254 user_od_table->OverDriveTable.FanLinearTempPoints[i] =
2255 user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
2256 user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
2257 user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
2258 }
2259 user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
2260 user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
2261 user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
2262 user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
2263 user_od_table->OverDriveTable.FanTargetTemperature =
2264 user_od_table_bak.OverDriveTable.FanTargetTemperature;
2265 user_od_table->OverDriveTable.FanMinimumPwm =
2266 user_od_table_bak.OverDriveTable.FanMinimumPwm;
2267 user_od_table->OverDriveTable.FanZeroRpmEnable =
2268 user_od_table_bak.OverDriveTable.FanZeroRpmEnable;
2269 user_od_table->OverDriveTable.FanZeroRpmStopTemp =
2270 user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp;
2271 }
2272
2273 smu_v13_0_7_set_supported_od_feature_mask(smu);
2274
2275 return 0;
2276 }
2277
smu_v13_0_7_restore_user_od_settings(struct smu_context * smu)2278 static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu)
2279 {
2280 struct smu_table_context *table_context = &smu->smu_table;
2281 OverDriveTableExternal_t *od_table = table_context->overdrive_table;
2282 OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
2283 int res;
2284
2285 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
2286 BIT(PP_OD_FEATURE_UCLK_BIT) |
2287 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
2288 BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2289 res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table);
2290 user_od_table->OverDriveTable.FeatureCtrlMask = 0;
2291 if (res == 0)
2292 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
2293
2294 return res;
2295 }
2296
smu_v13_0_7_populate_umd_state_clk(struct smu_context * smu)2297 static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
2298 {
2299 struct smu_13_0_dpm_context *dpm_context =
2300 smu->smu_dpm.dpm_context;
2301 struct smu_13_0_dpm_table *gfx_table =
2302 &dpm_context->dpm_tables.gfx_table;
2303 struct smu_13_0_dpm_table *mem_table =
2304 &dpm_context->dpm_tables.uclk_table;
2305 struct smu_13_0_dpm_table *soc_table =
2306 &dpm_context->dpm_tables.soc_table;
2307 struct smu_13_0_dpm_table *vclk_table =
2308 &dpm_context->dpm_tables.vclk_table;
2309 struct smu_13_0_dpm_table *dclk_table =
2310 &dpm_context->dpm_tables.dclk_table;
2311 struct smu_13_0_dpm_table *fclk_table =
2312 &dpm_context->dpm_tables.fclk_table;
2313 struct smu_umd_pstate_table *pstate_table =
2314 &smu->pstate_table;
2315 struct smu_table_context *table_context = &smu->smu_table;
2316 PPTable_t *pptable = table_context->driver_pptable;
2317 DriverReportedClocks_t driver_clocks =
2318 pptable->SkuTable.DriverReportedClocks;
2319
2320 pstate_table->gfxclk_pstate.min = gfx_table->min;
2321 if (driver_clocks.GameClockAc &&
2322 (driver_clocks.GameClockAc < gfx_table->max))
2323 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
2324 else
2325 pstate_table->gfxclk_pstate.peak = gfx_table->max;
2326
2327 pstate_table->uclk_pstate.min = mem_table->min;
2328 pstate_table->uclk_pstate.peak = mem_table->max;
2329
2330 pstate_table->socclk_pstate.min = soc_table->min;
2331 pstate_table->socclk_pstate.peak = soc_table->max;
2332
2333 pstate_table->vclk_pstate.min = vclk_table->min;
2334 pstate_table->vclk_pstate.peak = vclk_table->max;
2335
2336 pstate_table->dclk_pstate.min = dclk_table->min;
2337 pstate_table->dclk_pstate.peak = dclk_table->max;
2338
2339 pstate_table->fclk_pstate.min = fclk_table->min;
2340 pstate_table->fclk_pstate.peak = fclk_table->max;
2341
2342 if (driver_clocks.BaseClockAc &&
2343 driver_clocks.BaseClockAc < gfx_table->max)
2344 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
2345 else
2346 pstate_table->gfxclk_pstate.standard = gfx_table->max;
2347 pstate_table->uclk_pstate.standard = mem_table->max;
2348 pstate_table->socclk_pstate.standard = soc_table->min;
2349 pstate_table->vclk_pstate.standard = vclk_table->min;
2350 pstate_table->dclk_pstate.standard = dclk_table->min;
2351 pstate_table->fclk_pstate.standard = fclk_table->min;
2352
2353 return 0;
2354 }
2355
smu_v13_0_7_get_fan_speed_pwm(struct smu_context * smu,uint32_t * speed)2356 static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu,
2357 uint32_t *speed)
2358 {
2359 int ret;
2360
2361 if (!speed)
2362 return -EINVAL;
2363
2364 ret = smu_v13_0_7_get_smu_metrics_data(smu,
2365 METRICS_CURR_FANPWM,
2366 speed);
2367 if (ret) {
2368 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
2369 return ret;
2370 }
2371
2372 /* Convert the PMFW output which is in percent to pwm(255) based */
2373 *speed = min(*speed * 255 / 100, (uint32_t)255);
2374
2375 return 0;
2376 }
2377
smu_v13_0_7_get_fan_speed_rpm(struct smu_context * smu,uint32_t * speed)2378 static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu,
2379 uint32_t *speed)
2380 {
2381 if (!speed)
2382 return -EINVAL;
2383
2384 return smu_v13_0_7_get_smu_metrics_data(smu,
2385 METRICS_CURR_FANSPEED,
2386 speed);
2387 }
2388
smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context * smu)2389 static int smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context *smu)
2390 {
2391 struct smu_table_context *table_context = &smu->smu_table;
2392 PPTable_t *pptable = table_context->driver_pptable;
2393 SkuTable_t *skutable = &pptable->SkuTable;
2394
2395 /*
2396 * Skip the MGpuFanBoost setting for those ASICs
2397 * which do not support it
2398 */
2399 if (skutable->MGpuAcousticLimitRpmThreshold == 0)
2400 return 0;
2401
2402 return smu_cmn_send_smc_msg_with_param(smu,
2403 SMU_MSG_SetMGpuFanBoostLimitRpm,
2404 0,
2405 NULL);
2406 }
2407
smu_v13_0_7_get_power_limit(struct smu_context * smu,uint32_t * current_power_limit,uint32_t * default_power_limit,uint32_t * max_power_limit,uint32_t * min_power_limit)2408 static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
2409 uint32_t *current_power_limit,
2410 uint32_t *default_power_limit,
2411 uint32_t *max_power_limit,
2412 uint32_t *min_power_limit)
2413 {
2414 struct smu_table_context *table_context = &smu->smu_table;
2415 struct smu_13_0_7_powerplay_table *powerplay_table =
2416 (struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
2417 PPTable_t *pptable = table_context->driver_pptable;
2418 SkuTable_t *skutable = &pptable->SkuTable;
2419 uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
2420 uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
2421
2422 if (smu_v13_0_get_current_power_limit(smu, &power_limit))
2423 power_limit = smu->adev->pm.ac_power ?
2424 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
2425 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
2426
2427 if (current_power_limit)
2428 *current_power_limit = power_limit;
2429 if (default_power_limit)
2430 *default_power_limit = power_limit;
2431
2432 if (powerplay_table) {
2433 if (smu->od_enabled &&
2434 (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) {
2435 od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
2436 od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
2437 } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
2438 od_percent_upper = 0;
2439 od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
2440 }
2441 }
2442
2443 dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
2444 od_percent_upper, od_percent_lower, power_limit);
2445
2446 if (max_power_limit) {
2447 *max_power_limit = msg_limit * (100 + od_percent_upper);
2448 *max_power_limit /= 100;
2449 }
2450
2451 if (min_power_limit) {
2452 *min_power_limit = power_limit * (100 - od_percent_lower);
2453 *min_power_limit /= 100;
2454 }
2455
2456 return 0;
2457 }
2458
smu_v13_0_7_get_power_profile_mode(struct smu_context * smu,char * buf)2459 static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf)
2460 {
2461 DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external;
2462 uint32_t i, j, size = 0;
2463 int16_t workload_type = 0;
2464 int result = 0;
2465
2466 if (!buf)
2467 return -EINVAL;
2468
2469 activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT,
2470 sizeof(*activity_monitor_external),
2471 GFP_KERNEL);
2472 if (!activity_monitor_external)
2473 return -ENOMEM;
2474
2475 size += sysfs_emit_at(buf, size, " ");
2476 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
2477 size += sysfs_emit_at(buf, size, "%d %-14s%s", i, amdgpu_pp_profile_name[i],
2478 (i == smu->power_profile_mode) ? "* " : " ");
2479
2480 size += sysfs_emit_at(buf, size, "\n");
2481
2482 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) {
2483 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
2484 workload_type = smu_cmn_to_asic_specific_index(smu,
2485 CMN2ASIC_MAPPING_WORKLOAD,
2486 i);
2487 if (workload_type == -ENOTSUPP)
2488 continue;
2489 else if (workload_type < 0) {
2490 result = -EINVAL;
2491 goto out;
2492 }
2493
2494 result = smu_cmn_update_table(smu,
2495 SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
2496 (void *)(&activity_monitor_external[i]), false);
2497 if (result) {
2498 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
2499 goto out;
2500 }
2501 }
2502
2503 #define PRINT_DPM_MONITOR(field) \
2504 do { \
2505 size += sysfs_emit_at(buf, size, "%-30s", #field); \
2506 for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \
2507 size += sysfs_emit_at(buf, size, "%-18d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
2508 size += sysfs_emit_at(buf, size, "\n"); \
2509 } while (0)
2510
2511 PRINT_DPM_MONITOR(Gfx_ActiveHystLimit);
2512 PRINT_DPM_MONITOR(Gfx_IdleHystLimit);
2513 PRINT_DPM_MONITOR(Gfx_FPS);
2514 PRINT_DPM_MONITOR(Gfx_MinActiveFreqType);
2515 PRINT_DPM_MONITOR(Gfx_BoosterFreqType);
2516 PRINT_DPM_MONITOR(Gfx_MinActiveFreq);
2517 PRINT_DPM_MONITOR(Gfx_BoosterFreq);
2518 PRINT_DPM_MONITOR(Fclk_ActiveHystLimit);
2519 PRINT_DPM_MONITOR(Fclk_IdleHystLimit);
2520 PRINT_DPM_MONITOR(Fclk_FPS);
2521 PRINT_DPM_MONITOR(Fclk_MinActiveFreqType);
2522 PRINT_DPM_MONITOR(Fclk_BoosterFreqType);
2523 PRINT_DPM_MONITOR(Fclk_MinActiveFreq);
2524 PRINT_DPM_MONITOR(Fclk_BoosterFreq);
2525 #undef PRINT_DPM_MONITOR
2526
2527 result = size;
2528 out:
2529 kfree(activity_monitor_external);
2530 return result;
2531 }
2532
2533 #define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8
2534 #define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2
2535 #define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long))
2536
smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context * smu,long * input)2537 static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu,
2538 long *input)
2539 {
2540
2541 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
2542 DpmActivityMonitorCoeffInt_t *activity_monitor =
2543 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
2544 int ret, idx;
2545
2546 ret = smu_cmn_update_table(smu,
2547 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
2548 (void *)(&activity_monitor_external), false);
2549 if (ret) {
2550 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
2551 return ret;
2552 }
2553
2554 idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
2555 if (input[idx]) {
2556 /* Gfxclk */
2557 activity_monitor->Gfx_ActiveHystLimit = input[idx + 1];
2558 activity_monitor->Gfx_IdleHystLimit = input[idx + 2];
2559 activity_monitor->Gfx_FPS = input[idx + 3];
2560 activity_monitor->Gfx_MinActiveFreqType = input[idx + 4];
2561 activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
2562 activity_monitor->Gfx_MinActiveFreq = input[idx + 6];
2563 activity_monitor->Gfx_BoosterFreq = input[idx + 7];
2564 }
2565 idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
2566 if (input[idx]) {
2567 /* Fclk */
2568 activity_monitor->Fclk_ActiveHystLimit = input[idx + 1];
2569 activity_monitor->Fclk_IdleHystLimit = input[idx + 2];
2570 activity_monitor->Fclk_FPS = input[idx + 3];
2571 activity_monitor->Fclk_MinActiveFreqType = input[idx + 4];
2572 activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
2573 activity_monitor->Fclk_MinActiveFreq = input[idx + 6];
2574 activity_monitor->Fclk_BoosterFreq = input[idx + 7];
2575 }
2576
2577 ret = smu_cmn_update_table(smu,
2578 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
2579 (void *)(&activity_monitor_external), true);
2580 if (ret) {
2581 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
2582 return ret;
2583 }
2584
2585 return ret;
2586 }
2587
smu_v13_0_7_set_power_profile_mode(struct smu_context * smu,u32 workload_mask,long * custom_params,u32 custom_params_max_idx)2588 static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu,
2589 u32 workload_mask,
2590 long *custom_params,
2591 u32 custom_params_max_idx)
2592 {
2593 u32 backend_workload_mask = 0;
2594 int ret, idx = -1, i;
2595
2596 smu_cmn_get_backend_workload_mask(smu, workload_mask,
2597 &backend_workload_mask);
2598
2599 if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
2600 if (!smu->custom_profile_params) {
2601 smu->custom_profile_params =
2602 kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
2603 if (!smu->custom_profile_params)
2604 return -ENOMEM;
2605 }
2606 if (custom_params && custom_params_max_idx) {
2607 if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT)
2608 return -EINVAL;
2609 if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT)
2610 return -EINVAL;
2611 idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
2612 smu->custom_profile_params[idx] = 1;
2613 for (i = 1; i < custom_params_max_idx; i++)
2614 smu->custom_profile_params[idx + i] = custom_params[i];
2615 }
2616 ret = smu_v13_0_7_set_power_profile_mode_coeff(smu,
2617 smu->custom_profile_params);
2618 if (ret) {
2619 if (idx != -1)
2620 smu->custom_profile_params[idx] = 0;
2621 return ret;
2622 }
2623 } else if (smu->custom_profile_params) {
2624 memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE);
2625 }
2626
2627 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
2628 backend_workload_mask, NULL);
2629
2630 if (ret) {
2631 dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
2632 workload_mask);
2633 if (idx != -1)
2634 smu->custom_profile_params[idx] = 0;
2635 return ret;
2636 }
2637
2638 return ret;
2639 }
2640
smu_v13_0_7_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)2641 static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
2642 enum pp_mp1_state mp1_state)
2643 {
2644 int ret;
2645
2646 switch (mp1_state) {
2647 case PP_MP1_STATE_UNLOAD:
2648 ret = smu_cmn_set_mp1_state(smu, mp1_state);
2649 break;
2650 default:
2651 /* Ignore others */
2652 ret = 0;
2653 }
2654
2655 return ret;
2656 }
2657
smu_v13_0_7_is_mode1_reset_supported(struct smu_context * smu)2658 static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
2659 {
2660 struct amdgpu_device *adev = smu->adev;
2661
2662 /* SRIOV does not support SMU mode1 reset */
2663 if (amdgpu_sriov_vf(adev))
2664 return false;
2665
2666 return true;
2667 }
2668
smu_v13_0_7_set_df_cstate(struct smu_context * smu,enum pp_df_cstate state)2669 static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
2670 enum pp_df_cstate state)
2671 {
2672 return smu_cmn_send_smc_msg_with_param(smu,
2673 SMU_MSG_DFCstateControl,
2674 state,
2675 NULL);
2676 }
2677
smu_v13_0_7_wbrf_support_check(struct smu_context * smu)2678 static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu)
2679 {
2680 return smu->smc_fw_version > 0x00524600;
2681 }
2682
smu_v13_0_7_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t limit)2683 static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
2684 enum smu_ppt_limit_type limit_type,
2685 uint32_t limit)
2686 {
2687 PPTable_t *pptable = smu->smu_table.driver_pptable;
2688 SkuTable_t *skutable = &pptable->SkuTable;
2689 uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
2690 struct smu_table_context *table_context = &smu->smu_table;
2691 OverDriveTableExternal_t *od_table =
2692 (OverDriveTableExternal_t *)table_context->overdrive_table;
2693 int ret = 0;
2694
2695 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2696 return -EINVAL;
2697
2698 if (limit <= msg_limit) {
2699 if (smu->current_power_limit > msg_limit) {
2700 od_table->OverDriveTable.Ppt = 0;
2701 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2702
2703 ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
2704 if (ret) {
2705 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2706 return ret;
2707 }
2708 }
2709 return smu_v13_0_set_power_limit(smu, limit_type, limit);
2710 } else if (smu->od_enabled) {
2711 ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
2712 if (ret)
2713 return ret;
2714
2715 od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
2716 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2717
2718 ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
2719 if (ret) {
2720 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2721 return ret;
2722 }
2723
2724 smu->current_power_limit = limit;
2725 } else {
2726 return -EINVAL;
2727 }
2728
2729 return 0;
2730 }
2731
2732 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
2733 .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
2734 .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
2735 .is_dpm_running = smu_v13_0_7_is_dpm_running,
2736 .init_microcode = smu_v13_0_init_microcode,
2737 .load_microcode = smu_v13_0_load_microcode,
2738 .fini_microcode = smu_v13_0_fini_microcode,
2739 .init_smc_tables = smu_v13_0_7_init_smc_tables,
2740 .fini_smc_tables = smu_v13_0_fini_smc_tables,
2741 .init_power = smu_v13_0_init_power,
2742 .fini_power = smu_v13_0_fini_power,
2743 .check_fw_status = smu_v13_0_7_check_fw_status,
2744 .setup_pptable = smu_v13_0_7_setup_pptable,
2745 .check_fw_version = smu_v13_0_check_fw_version,
2746 .write_pptable = smu_cmn_write_pptable,
2747 .set_driver_table_location = smu_v13_0_set_driver_table_location,
2748 .system_features_control = smu_v13_0_system_features_control,
2749 .set_allowed_mask = smu_v13_0_set_allowed_mask,
2750 .get_enabled_mask = smu_cmn_get_enabled_mask,
2751 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
2752 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
2753 .init_pptable_microcode = smu_v13_0_init_pptable_microcode,
2754 .populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
2755 .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
2756 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
2757 .read_sensor = smu_v13_0_7_read_sensor,
2758 .feature_is_enabled = smu_cmn_feature_is_enabled,
2759 .print_clk_levels = smu_v13_0_7_print_clk_levels,
2760 .force_clk_levels = smu_v13_0_7_force_clk_levels,
2761 .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
2762 .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
2763 .register_irq_handler = smu_v13_0_register_irq_handler,
2764 .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
2765 .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
2766 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
2767 .get_gpu_metrics = smu_v13_0_7_get_gpu_metrics,
2768 .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
2769 .set_default_od_settings = smu_v13_0_7_set_default_od_settings,
2770 .restore_user_od_settings = smu_v13_0_7_restore_user_od_settings,
2771 .od_edit_dpm_table = smu_v13_0_7_od_edit_dpm_table,
2772 .set_performance_level = smu_v13_0_set_performance_level,
2773 .gfx_off_control = smu_v13_0_gfx_off_control,
2774 .get_fan_speed_pwm = smu_v13_0_7_get_fan_speed_pwm,
2775 .get_fan_speed_rpm = smu_v13_0_7_get_fan_speed_rpm,
2776 .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm,
2777 .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm,
2778 .get_fan_control_mode = smu_v13_0_get_fan_control_mode,
2779 .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
2780 .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
2781 .get_power_limit = smu_v13_0_7_get_power_limit,
2782 .set_power_limit = smu_v13_0_7_set_power_limit,
2783 .set_power_source = smu_v13_0_set_power_source,
2784 .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
2785 .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
2786 .set_tool_table_location = smu_v13_0_set_tool_table_location,
2787 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2788 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2789 .get_bamaco_support = smu_v13_0_get_bamaco_support,
2790 .baco_enter = smu_v13_0_baco_enter,
2791 .baco_exit = smu_v13_0_baco_exit,
2792 .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
2793 .mode1_reset = smu_v13_0_mode1_reset,
2794 .set_mp1_state = smu_v13_0_7_set_mp1_state,
2795 .set_df_cstate = smu_v13_0_7_set_df_cstate,
2796 .gpo_control = smu_v13_0_gpo_control,
2797 .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
2798 .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
2799 .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
2800 .interrupt_work = smu_v13_0_interrupt_work,
2801 };
2802
smu_v13_0_7_set_ppt_funcs(struct smu_context * smu)2803 void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
2804 {
2805 smu->ppt_funcs = &smu_v13_0_7_ppt_funcs;
2806 smu->message_map = smu_v13_0_7_message_map;
2807 smu->clock_map = smu_v13_0_7_clk_map;
2808 smu->feature_map = smu_v13_0_7_feature_mask_map;
2809 smu->table_map = smu_v13_0_7_table_map;
2810 smu->pwr_src_map = smu_v13_0_7_pwr_src_map;
2811 smu->workload_map = smu_v13_0_7_workload_map;
2812 smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
2813 smu_v13_0_set_smu_mailbox_registers(smu);
2814 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
2815 }
2816