xref: /linux/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #define SWSMU_CODE_LAYER_L2
25 
26 #include <linux/firmware.h>
27 #include "amdgpu.h"
28 #include "amdgpu_dpm.h"
29 #include "amdgpu_smu.h"
30 #include "atomfirmware.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_atombios.h"
33 #include "smu_v13_0.h"
34 #include "smu13_driver_if_aldebaran.h"
35 #include "soc15_common.h"
36 #include "atom.h"
37 #include "aldebaran_ppt.h"
38 #include "smu_v13_0_pptable.h"
39 #include "aldebaran_ppsmc.h"
40 #include "nbio/nbio_7_4_offset.h"
41 #include "nbio/nbio_7_4_sh_mask.h"
42 #include "thm/thm_11_0_2_offset.h"
43 #include "thm/thm_11_0_2_sh_mask.h"
44 #include "amdgpu_xgmi.h"
45 #include <linux/pci.h>
46 #include "amdgpu_ras.h"
47 #include "smu_cmn.h"
48 #include "mp/mp_13_0_2_offset.h"
49 
50 /*
51  * DO NOT use these for err/warn/info/debug messages.
52  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53  * They are more MGPU friendly.
54  */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59 
60 #define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature) \
61 	[smu_feature] = {1, (aldebaran_feature)}
62 
63 #define FEATURE_MASK(feature) (1ULL << feature)
64 #define SMC_DPM_FEATURE ( \
65 			  FEATURE_MASK(FEATURE_DATA_CALCULATIONS) | \
66 			  FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)	| \
67 			  FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	| \
68 			  FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)	| \
69 			  FEATURE_MASK(FEATURE_DPM_FCLK_BIT)	| \
70 			  FEATURE_MASK(FEATURE_DPM_LCLK_BIT)	| \
71 			  FEATURE_MASK(FEATURE_DPM_XGMI_BIT)	| \
72 			  FEATURE_MASK(FEATURE_DPM_VCN_BIT))
73 
74 /* possible frequency drift (1Mhz) */
75 #define EPSILON				1
76 
77 #define smnPCIE_ESM_CTRL			0x111003D0
78 
79 /*
80  * SMU support ECCTABLE since version 68.42.0,
81  * use this to check ECCTALE feature whether support
82  */
83 #define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00
84 
85 /*
86  * SMU support mca_ceumc_addr in ECCTABLE since version 68.55.0,
87  * use this to check mca_ceumc_addr record whether support
88  */
89 #define SUPPORT_ECCTABLE_V2_SMU_VERSION 0x00443700
90 
91 /*
92  * SMU support BAD CHENNEL info MSG since version 68.51.00,
93  * use this to check ECCTALE feature whether support
94  */
95 #define SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION 0x00443300
96 
97 static const struct smu_temperature_range smu13_thermal_policy[] = {
98 	{-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
99 	{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
100 };
101 
102 static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
103 	MSG_MAP(TestMessage,			     PPSMC_MSG_TestMessage,			0),
104 	MSG_MAP(GetSmuVersion,			     PPSMC_MSG_GetSmuVersion,			1),
105 	MSG_MAP(GetDriverIfVersion,		     PPSMC_MSG_GetDriverIfVersion,		1),
106 	MSG_MAP(EnableAllSmuFeatures,		     PPSMC_MSG_EnableAllSmuFeatures,		0),
107 	MSG_MAP(DisableAllSmuFeatures,		     PPSMC_MSG_DisableAllSmuFeatures,		0),
108 	MSG_MAP(GetEnabledSmuFeaturesLow,	     PPSMC_MSG_GetEnabledSmuFeaturesLow,	1),
109 	MSG_MAP(GetEnabledSmuFeaturesHigh,	     PPSMC_MSG_GetEnabledSmuFeaturesHigh,	1),
110 	MSG_MAP(SetDriverDramAddrHigh,		     PPSMC_MSG_SetDriverDramAddrHigh,		1),
111 	MSG_MAP(SetDriverDramAddrLow,		     PPSMC_MSG_SetDriverDramAddrLow,		1),
112 	MSG_MAP(SetToolsDramAddrHigh,		     PPSMC_MSG_SetToolsDramAddrHigh,		0),
113 	MSG_MAP(SetToolsDramAddrLow,		     PPSMC_MSG_SetToolsDramAddrLow,		0),
114 	MSG_MAP(TransferTableSmu2Dram,		     PPSMC_MSG_TransferTableSmu2Dram,		1),
115 	MSG_MAP(TransferTableDram2Smu,		     PPSMC_MSG_TransferTableDram2Smu,		0),
116 	MSG_MAP(UseDefaultPPTable,		     PPSMC_MSG_UseDefaultPPTable,		0),
117 	MSG_MAP(SetSystemVirtualDramAddrHigh,	     PPSMC_MSG_SetSystemVirtualDramAddrHigh,	0),
118 	MSG_MAP(SetSystemVirtualDramAddrLow,	     PPSMC_MSG_SetSystemVirtualDramAddrLow,	0),
119 	MSG_MAP(SetSoftMinByFreq,		     PPSMC_MSG_SetSoftMinByFreq,		0),
120 	MSG_MAP(SetSoftMaxByFreq,		     PPSMC_MSG_SetSoftMaxByFreq,		0),
121 	MSG_MAP(SetHardMinByFreq,		     PPSMC_MSG_SetHardMinByFreq,		0),
122 	MSG_MAP(SetHardMaxByFreq,		     PPSMC_MSG_SetHardMaxByFreq,		0),
123 	MSG_MAP(GetMinDpmFreq,			     PPSMC_MSG_GetMinDpmFreq,			0),
124 	MSG_MAP(GetMaxDpmFreq,			     PPSMC_MSG_GetMaxDpmFreq,			0),
125 	MSG_MAP(GetDpmFreqByIndex,		     PPSMC_MSG_GetDpmFreqByIndex,		1),
126 	MSG_MAP(SetWorkloadMask,		     PPSMC_MSG_SetWorkloadMask,			1),
127 	MSG_MAP(GetVoltageByDpm,		     PPSMC_MSG_GetVoltageByDpm,			0),
128 	MSG_MAP(GetVoltageByDpmOverdrive,	     PPSMC_MSG_GetVoltageByDpmOverdrive,	0),
129 	MSG_MAP(SetPptLimit,			     PPSMC_MSG_SetPptLimit,			0),
130 	MSG_MAP(GetPptLimit,			     PPSMC_MSG_GetPptLimit,			1),
131 	MSG_MAP(PrepareMp1ForUnload,		     PPSMC_MSG_PrepareMp1ForUnload,		0),
132 	MSG_MAP(GfxDeviceDriverReset,		     PPSMC_MSG_GfxDriverReset,			0),
133 	MSG_MAP(RunDcBtc,			     PPSMC_MSG_RunDcBtc,			0),
134 	MSG_MAP(DramLogSetDramAddrHigh,		     PPSMC_MSG_DramLogSetDramAddrHigh,		0),
135 	MSG_MAP(DramLogSetDramAddrLow,		     PPSMC_MSG_DramLogSetDramAddrLow,		0),
136 	MSG_MAP(DramLogSetDramSize,		     PPSMC_MSG_DramLogSetDramSize,		0),
137 	MSG_MAP(GetDebugData,			     PPSMC_MSG_GetDebugData,			0),
138 	MSG_MAP(WaflTest,			     PPSMC_MSG_WaflTest,			0),
139 	MSG_MAP(SetMemoryChannelEnable,		     PPSMC_MSG_SetMemoryChannelEnable,		0),
140 	MSG_MAP(SetNumBadHbmPagesRetired,	     PPSMC_MSG_SetNumBadHbmPagesRetired,	0),
141 	MSG_MAP(DFCstateControl,		     PPSMC_MSG_DFCstateControl,			0),
142 	MSG_MAP(GetGmiPwrDnHyst,		     PPSMC_MSG_GetGmiPwrDnHyst,			0),
143 	MSG_MAP(SetGmiPwrDnHyst,		     PPSMC_MSG_SetGmiPwrDnHyst,			0),
144 	MSG_MAP(GmiPwrDnControl,		     PPSMC_MSG_GmiPwrDnControl,			0),
145 	MSG_MAP(EnterGfxoff,			     PPSMC_MSG_EnterGfxoff,			0),
146 	MSG_MAP(ExitGfxoff,			     PPSMC_MSG_ExitGfxoff,			0),
147 	MSG_MAP(SetExecuteDMATest,		     PPSMC_MSG_SetExecuteDMATest,		0),
148 	MSG_MAP(EnableDeterminism,		     PPSMC_MSG_EnableDeterminism,		0),
149 	MSG_MAP(DisableDeterminism,		     PPSMC_MSG_DisableDeterminism,		0),
150 	MSG_MAP(SetUclkDpmMode,			     PPSMC_MSG_SetUclkDpmMode,			0),
151 	MSG_MAP(GfxDriverResetRecovery,		     PPSMC_MSG_GfxDriverResetRecovery,		0),
152 	MSG_MAP(BoardPowerCalibration,		     PPSMC_MSG_BoardPowerCalibration,		0),
153 	MSG_MAP(HeavySBR,                            PPSMC_MSG_HeavySBR,                        0),
154 	MSG_MAP(SetBadHBMPagesRetiredFlagsPerChannel,	PPSMC_MSG_SetBadHBMPagesRetiredFlagsPerChannel,	0),
155 };
156 
157 static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
158 	CLK_MAP(GFXCLK, PPCLK_GFXCLK),
159 	CLK_MAP(SCLK,	PPCLK_GFXCLK),
160 	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
161 	CLK_MAP(FCLK, PPCLK_FCLK),
162 	CLK_MAP(UCLK, PPCLK_UCLK),
163 	CLK_MAP(MCLK, PPCLK_UCLK),
164 	CLK_MAP(DCLK, PPCLK_DCLK),
165 	CLK_MAP(VCLK, PPCLK_VCLK),
166 	CLK_MAP(LCLK, 	PPCLK_LCLK),
167 };
168 
169 static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUNT] = {
170 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, 		FEATURE_DATA_CALCULATIONS),
171 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, 			FEATURE_DPM_GFXCLK_BIT),
172 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, 			FEATURE_DPM_UCLK_BIT),
173 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, 			FEATURE_DPM_SOCCLK_BIT),
174 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, 			FEATURE_DPM_FCLK_BIT),
175 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, 			FEATURE_DPM_LCLK_BIT),
176 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, 				FEATURE_DPM_XGMI_BIT),
177 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, 			FEATURE_DS_GFXCLK_BIT),
178 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, 			FEATURE_DS_SOCCLK_BIT),
179 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, 				FEATURE_DS_LCLK_BIT),
180 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, 				FEATURE_DS_FCLK_BIT),
181 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_UCLK_BIT,				FEATURE_DS_UCLK_BIT),
182 	ALDEBARAN_FEA_MAP(SMU_FEATURE_GFX_SS_BIT, 				FEATURE_GFX_SS_BIT),
183 	ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, 				FEATURE_DPM_VCN_BIT),
184 	ALDEBARAN_FEA_MAP(SMU_FEATURE_RSMU_SMN_CG_BIT, 			FEATURE_RSMU_SMN_CG_BIT),
185 	ALDEBARAN_FEA_MAP(SMU_FEATURE_WAFL_CG_BIT, 				FEATURE_WAFL_CG_BIT),
186 	ALDEBARAN_FEA_MAP(SMU_FEATURE_PPT_BIT, 					FEATURE_PPT_BIT),
187 	ALDEBARAN_FEA_MAP(SMU_FEATURE_TDC_BIT, 					FEATURE_TDC_BIT),
188 	ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_PLUS_BIT, 			FEATURE_APCC_PLUS_BIT),
189 	ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, 			FEATURE_APCC_DFLL_BIT),
190 	ALDEBARAN_FEA_MAP(SMU_FEATURE_FUSE_CG_BIT, 				FEATURE_FUSE_CG_BIT),
191 	ALDEBARAN_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, 				FEATURE_MP1_CG_BIT),
192 	ALDEBARAN_FEA_MAP(SMU_FEATURE_SMUIO_CG_BIT, 			FEATURE_SMUIO_CG_BIT),
193 	ALDEBARAN_FEA_MAP(SMU_FEATURE_THM_CG_BIT, 				FEATURE_THM_CG_BIT),
194 	ALDEBARAN_FEA_MAP(SMU_FEATURE_CLK_CG_BIT, 				FEATURE_CLK_CG_BIT),
195 	ALDEBARAN_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, 				FEATURE_FW_CTF_BIT),
196 	ALDEBARAN_FEA_MAP(SMU_FEATURE_THERMAL_BIT, 				FEATURE_THERMAL_BIT),
197 	ALDEBARAN_FEA_MAP(SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, 	FEATURE_OUT_OF_BAND_MONITOR_BIT),
198 	ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DWN),
199 	ALDEBARAN_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, 			FEATURE_DF_CSTATE),
200 };
201 
202 static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = {
203 	TAB_MAP(PPTABLE),
204 	TAB_MAP(AVFS_PSM_DEBUG),
205 	TAB_MAP(AVFS_FUSE_OVERRIDE),
206 	TAB_MAP(PMSTATUSLOG),
207 	TAB_MAP(SMU_METRICS),
208 	TAB_MAP(DRIVER_SMU_CONFIG),
209 	TAB_MAP(I2C_COMMANDS),
210 	TAB_MAP(ECCINFO),
211 };
212 
213 static const uint8_t aldebaran_throttler_map[] = {
214 	[THROTTLER_PPT0_BIT]		= (SMU_THROTTLER_PPT0_BIT),
215 	[THROTTLER_PPT1_BIT]		= (SMU_THROTTLER_PPT1_BIT),
216 	[THROTTLER_TDC_GFX_BIT]		= (SMU_THROTTLER_TDC_GFX_BIT),
217 	[THROTTLER_TDC_SOC_BIT]		= (SMU_THROTTLER_TDC_SOC_BIT),
218 	[THROTTLER_TDC_HBM_BIT]		= (SMU_THROTTLER_TDC_MEM_BIT),
219 	[THROTTLER_TEMP_GPU_BIT]	= (SMU_THROTTLER_TEMP_GPU_BIT),
220 	[THROTTLER_TEMP_MEM_BIT]	= (SMU_THROTTLER_TEMP_MEM_BIT),
221 	[THROTTLER_TEMP_VR_GFX_BIT]	= (SMU_THROTTLER_TEMP_VR_GFX_BIT),
222 	[THROTTLER_TEMP_VR_SOC_BIT]	= (SMU_THROTTLER_TEMP_VR_SOC_BIT),
223 	[THROTTLER_TEMP_VR_MEM_BIT]	= (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
224 	[THROTTLER_APCC_BIT]		= (SMU_THROTTLER_APCC_BIT),
225 };
226 
227 static int aldebaran_tables_init(struct smu_context *smu)
228 {
229 	struct smu_table_context *smu_table = &smu->smu_table;
230 	struct smu_table *tables = smu_table->tables;
231 
232 	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
233 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
234 
235 	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
236 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
237 
238 	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
239 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
240 
241 	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
242 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
243 
244 	SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
245 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
246 
247 	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
248 	if (!smu_table->metrics_table)
249 		return -ENOMEM;
250 	smu_table->metrics_time = 0;
251 
252 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
253 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
254 	if (!smu_table->gpu_metrics_table) {
255 		kfree(smu_table->metrics_table);
256 		return -ENOMEM;
257 	}
258 
259 	smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
260 	if (!smu_table->ecc_table) {
261 		kfree(smu_table->metrics_table);
262 		kfree(smu_table->gpu_metrics_table);
263 		return -ENOMEM;
264 	}
265 
266 	return 0;
267 }
268 
269 static int aldebaran_select_plpd_policy(struct smu_context *smu, int level)
270 {
271 	struct amdgpu_device *adev = smu->adev;
272 
273 	/* The message only works on master die and NACK will be sent
274 	 * back for other dies, only send it on master die.
275 	 */
276 	if (adev->smuio.funcs->get_socket_id(adev) ||
277 	    adev->smuio.funcs->get_die_id(adev))
278 		return 0;
279 
280 	if (level == XGMI_PLPD_DEFAULT)
281 		return smu_cmn_send_smc_msg_with_param(
282 			smu, SMU_MSG_GmiPwrDnControl, 0, NULL);
283 	else if (level == XGMI_PLPD_DISALLOW)
284 		return smu_cmn_send_smc_msg_with_param(
285 			smu, SMU_MSG_GmiPwrDnControl, 1, NULL);
286 	else
287 		return -EINVAL;
288 }
289 
290 static int aldebaran_allocate_dpm_context(struct smu_context *smu)
291 {
292 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
293 	struct smu_dpm_policy *policy;
294 
295 	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
296 				       GFP_KERNEL);
297 	if (!smu_dpm->dpm_context)
298 		return -ENOMEM;
299 	smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
300 
301 	smu_dpm->dpm_policies =
302 		kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL);
303 
304 	if (!smu_dpm->dpm_policies)
305 		return -ENOMEM;
306 
307 	policy = &(smu_dpm->dpm_policies->policies[0]);
308 	policy->policy_type = PP_PM_POLICY_XGMI_PLPD;
309 	policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT);
310 	policy->current_level = XGMI_PLPD_DEFAULT;
311 	policy->set_policy = aldebaran_select_plpd_policy;
312 	smu_cmn_generic_plpd_policy_desc(policy);
313 	smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD);
314 
315 	return 0;
316 }
317 
318 static int aldebaran_init_smc_tables(struct smu_context *smu)
319 {
320 	int ret = 0;
321 
322 	ret = aldebaran_tables_init(smu);
323 	if (ret)
324 		return ret;
325 
326 	ret = aldebaran_allocate_dpm_context(smu);
327 	if (ret)
328 		return ret;
329 
330 	return smu_v13_0_init_smc_tables(smu);
331 }
332 
333 static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
334 					      uint32_t *feature_mask, uint32_t num)
335 {
336 	if (num > 2)
337 		return -EINVAL;
338 
339 	/* pptable will handle the features to enable */
340 	memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
341 
342 	return 0;
343 }
344 
345 static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu,
346 					   enum smu_clk_type clk_type,
347 					   uint32_t *min, uint32_t *max)
348 {
349 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
350 	struct smu_13_0_dpm_table *dpm_table;
351 	uint32_t min_clk, max_clk;
352 
353 	if (amdgpu_sriov_vf(smu->adev)) {
354 		switch (clk_type) {
355 		case SMU_MCLK:
356 		case SMU_UCLK:
357 			dpm_table = &dpm_context->dpm_tables.uclk_table;
358 			break;
359 		case SMU_GFXCLK:
360 		case SMU_SCLK:
361 			dpm_table = &dpm_context->dpm_tables.gfx_table;
362 			break;
363 		case SMU_SOCCLK:
364 			dpm_table = &dpm_context->dpm_tables.soc_table;
365 			break;
366 		case SMU_FCLK:
367 			dpm_table = &dpm_context->dpm_tables.fclk_table;
368 			break;
369 		case SMU_VCLK:
370 			dpm_table = &dpm_context->dpm_tables.vclk_table;
371 			break;
372 		case SMU_DCLK:
373 			dpm_table = &dpm_context->dpm_tables.dclk_table;
374 			break;
375 		default:
376 			return -EINVAL;
377 		}
378 
379 		min_clk = dpm_table->min;
380 		max_clk = dpm_table->max;
381 
382 		if (min) {
383 			if (!min_clk)
384 				return -ENODATA;
385 			*min = min_clk;
386 		}
387 		if (max) {
388 			if (!max_clk)
389 				return -ENODATA;
390 			*max = max_clk;
391 		}
392 
393 	} else {
394 		return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
395 	}
396 
397 	return 0;
398 }
399 
400 static int aldebaran_set_default_dpm_table(struct smu_context *smu)
401 {
402 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
403 	struct smu_13_0_dpm_table *dpm_table = NULL;
404 	PPTable_t *pptable = smu->smu_table.driver_pptable;
405 	int ret = 0;
406 
407 	/* socclk dpm table setup */
408 	dpm_table = &dpm_context->dpm_tables.soc_table;
409 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
410 		ret = smu_v13_0_set_single_dpm_table(smu,
411 						     SMU_SOCCLK,
412 						     dpm_table);
413 		if (ret)
414 			return ret;
415 	} else {
416 		dpm_table->count = 1;
417 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
418 		dpm_table->dpm_levels[0].enabled = true;
419 		dpm_table->min = dpm_table->dpm_levels[0].value;
420 		dpm_table->max = dpm_table->dpm_levels[0].value;
421 	}
422 
423 	/* gfxclk dpm table setup */
424 	dpm_table = &dpm_context->dpm_tables.gfx_table;
425 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
426 		/* in the case of gfxclk, only fine-grained dpm is honored */
427 		dpm_table->count = 2;
428 		dpm_table->dpm_levels[0].value = pptable->GfxclkFmin;
429 		dpm_table->dpm_levels[0].enabled = true;
430 		dpm_table->dpm_levels[1].value = pptable->GfxclkFmax;
431 		dpm_table->dpm_levels[1].enabled = true;
432 		dpm_table->min = dpm_table->dpm_levels[0].value;
433 		dpm_table->max = dpm_table->dpm_levels[1].value;
434 	} else {
435 		dpm_table->count = 1;
436 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
437 		dpm_table->dpm_levels[0].enabled = true;
438 		dpm_table->min = dpm_table->dpm_levels[0].value;
439 		dpm_table->max = dpm_table->dpm_levels[0].value;
440 	}
441 
442 	/* memclk dpm table setup */
443 	dpm_table = &dpm_context->dpm_tables.uclk_table;
444 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
445 		ret = smu_v13_0_set_single_dpm_table(smu,
446 						     SMU_UCLK,
447 						     dpm_table);
448 		if (ret)
449 			return ret;
450 	} else {
451 		dpm_table->count = 1;
452 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
453 		dpm_table->dpm_levels[0].enabled = true;
454 		dpm_table->min = dpm_table->dpm_levels[0].value;
455 		dpm_table->max = dpm_table->dpm_levels[0].value;
456 	}
457 
458 	/* fclk dpm table setup */
459 	dpm_table = &dpm_context->dpm_tables.fclk_table;
460 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
461 		ret = smu_v13_0_set_single_dpm_table(smu,
462 						     SMU_FCLK,
463 						     dpm_table);
464 		if (ret)
465 			return ret;
466 	} else {
467 		dpm_table->count = 1;
468 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
469 		dpm_table->dpm_levels[0].enabled = true;
470 		dpm_table->min = dpm_table->dpm_levels[0].value;
471 		dpm_table->max = dpm_table->dpm_levels[0].value;
472 	}
473 
474 	return 0;
475 }
476 
477 static int aldebaran_check_powerplay_table(struct smu_context *smu)
478 {
479 	struct smu_table_context *table_context = &smu->smu_table;
480 	struct smu_13_0_powerplay_table *powerplay_table =
481 		table_context->power_play_table;
482 
483 	table_context->thermal_controller_type =
484 		powerplay_table->thermal_controller_type;
485 
486 	return 0;
487 }
488 
489 static int aldebaran_store_powerplay_table(struct smu_context *smu)
490 {
491 	struct smu_table_context *table_context = &smu->smu_table;
492 	struct smu_13_0_powerplay_table *powerplay_table =
493 		table_context->power_play_table;
494 	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
495 	       sizeof(PPTable_t));
496 
497 	return 0;
498 }
499 
500 static int aldebaran_append_powerplay_table(struct smu_context *smu)
501 {
502 	struct smu_table_context *table_context = &smu->smu_table;
503 	PPTable_t *smc_pptable = table_context->driver_pptable;
504 	struct atom_smc_dpm_info_v4_10 *smc_dpm_table;
505 	int index, ret;
506 
507 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
508 					   smc_dpm_info);
509 
510 	ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
511 				      (uint8_t **)&smc_dpm_table);
512 	if (ret)
513 		return ret;
514 
515 	dev_info(smu->adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
516 			smc_dpm_table->table_header.format_revision,
517 			smc_dpm_table->table_header.content_revision);
518 
519 	if ((smc_dpm_table->table_header.format_revision == 4) &&
520 	    (smc_dpm_table->table_header.content_revision == 10))
521 		smu_memcpy_trailing(smc_pptable, GfxMaxCurrent, reserved,
522 				    smc_dpm_table, GfxMaxCurrent);
523 	return 0;
524 }
525 
526 static int aldebaran_setup_pptable(struct smu_context *smu)
527 {
528 	int ret = 0;
529 
530 	/* VBIOS pptable is the first choice */
531 	smu->smu_table.boot_values.pp_table_id = 0;
532 
533 	ret = smu_v13_0_setup_pptable(smu);
534 	if (ret)
535 		return ret;
536 
537 	ret = aldebaran_store_powerplay_table(smu);
538 	if (ret)
539 		return ret;
540 
541 	ret = aldebaran_append_powerplay_table(smu);
542 	if (ret)
543 		return ret;
544 
545 	ret = aldebaran_check_powerplay_table(smu);
546 	if (ret)
547 		return ret;
548 
549 	return ret;
550 }
551 
552 static bool aldebaran_is_primary(struct smu_context *smu)
553 {
554 	struct amdgpu_device *adev = smu->adev;
555 
556 	if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
557 		return adev->smuio.funcs->get_die_id(adev) == 0;
558 
559 	return true;
560 }
561 
562 static int aldebaran_run_board_btc(struct smu_context *smu)
563 {
564 	int ret;
565 
566 	if (!aldebaran_is_primary(smu))
567 		return 0;
568 
569 	if (smu->smc_fw_version <= 0x00441d00)
570 		return 0;
571 
572 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
573 	if (ret)
574 		dev_err(smu->adev->dev, "Board power calibration failed!\n");
575 
576 	return ret;
577 }
578 
579 static int aldebaran_run_btc(struct smu_context *smu)
580 {
581 	int ret;
582 
583 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
584 	if (ret)
585 		dev_err(smu->adev->dev, "RunDcBtc failed!\n");
586 	else
587 		ret = aldebaran_run_board_btc(smu);
588 
589 	return ret;
590 }
591 
592 static int aldebaran_populate_umd_state_clk(struct smu_context *smu)
593 {
594 	struct smu_13_0_dpm_context *dpm_context =
595 		smu->smu_dpm.dpm_context;
596 	struct smu_13_0_dpm_table *gfx_table =
597 		&dpm_context->dpm_tables.gfx_table;
598 	struct smu_13_0_dpm_table *mem_table =
599 		&dpm_context->dpm_tables.uclk_table;
600 	struct smu_13_0_dpm_table *soc_table =
601 		&dpm_context->dpm_tables.soc_table;
602 	struct smu_umd_pstate_table *pstate_table =
603 		&smu->pstate_table;
604 
605 	pstate_table->gfxclk_pstate.min = gfx_table->min;
606 	pstate_table->gfxclk_pstate.peak = gfx_table->max;
607 	pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
608 	pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
609 
610 	pstate_table->uclk_pstate.min = mem_table->min;
611 	pstate_table->uclk_pstate.peak = mem_table->max;
612 	pstate_table->uclk_pstate.curr.min = mem_table->min;
613 	pstate_table->uclk_pstate.curr.max = mem_table->max;
614 
615 	pstate_table->socclk_pstate.min = soc_table->min;
616 	pstate_table->socclk_pstate.peak = soc_table->max;
617 	pstate_table->socclk_pstate.curr.min = soc_table->min;
618 	pstate_table->socclk_pstate.curr.max = soc_table->max;
619 
620 	if (gfx_table->count > ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL &&
621 	    mem_table->count > ALDEBARAN_UMD_PSTATE_MCLK_LEVEL &&
622 	    soc_table->count > ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL) {
623 		pstate_table->gfxclk_pstate.standard =
624 			gfx_table->dpm_levels[ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL].value;
625 		pstate_table->uclk_pstate.standard =
626 			mem_table->dpm_levels[ALDEBARAN_UMD_PSTATE_MCLK_LEVEL].value;
627 		pstate_table->socclk_pstate.standard =
628 			soc_table->dpm_levels[ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL].value;
629 	} else {
630 		pstate_table->gfxclk_pstate.standard =
631 			pstate_table->gfxclk_pstate.min;
632 		pstate_table->uclk_pstate.standard =
633 			pstate_table->uclk_pstate.min;
634 		pstate_table->socclk_pstate.standard =
635 			pstate_table->socclk_pstate.min;
636 	}
637 
638 	return 0;
639 }
640 
641 static void aldebaran_get_clk_table(struct smu_context *smu,
642 				    struct pp_clock_levels_with_latency *clocks,
643 				    struct smu_13_0_dpm_table *dpm_table)
644 {
645 	uint32_t i;
646 
647 	clocks->num_levels = min_t(uint32_t,
648 				   dpm_table->count,
649 				   (uint32_t)PP_MAX_CLOCK_LEVELS);
650 
651 	for (i = 0; i < clocks->num_levels; i++) {
652 		clocks->data[i].clocks_in_khz =
653 			dpm_table->dpm_levels[i].value * 1000;
654 		clocks->data[i].latency_in_us = 0;
655 	}
656 
657 }
658 
659 static int aldebaran_freqs_in_same_level(int32_t frequency1,
660 					 int32_t frequency2)
661 {
662 	return (abs(frequency1 - frequency2) <= EPSILON);
663 }
664 
665 static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
666 					  MetricsMember_t member,
667 					  uint32_t *value)
668 {
669 	struct smu_table_context *smu_table = &smu->smu_table;
670 	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
671 	int ret = 0;
672 
673 	ret = smu_cmn_get_metrics_table(smu,
674 					NULL,
675 					false);
676 	if (ret)
677 		return ret;
678 
679 	switch (member) {
680 	case METRICS_CURR_GFXCLK:
681 		*value = metrics->CurrClock[PPCLK_GFXCLK];
682 		break;
683 	case METRICS_CURR_SOCCLK:
684 		*value = metrics->CurrClock[PPCLK_SOCCLK];
685 		break;
686 	case METRICS_CURR_UCLK:
687 		*value = metrics->CurrClock[PPCLK_UCLK];
688 		break;
689 	case METRICS_CURR_VCLK:
690 		*value = metrics->CurrClock[PPCLK_VCLK];
691 		break;
692 	case METRICS_CURR_DCLK:
693 		*value = metrics->CurrClock[PPCLK_DCLK];
694 		break;
695 	case METRICS_CURR_FCLK:
696 		*value = metrics->CurrClock[PPCLK_FCLK];
697 		break;
698 	case METRICS_AVERAGE_GFXCLK:
699 		*value = metrics->AverageGfxclkFrequency;
700 		break;
701 	case METRICS_AVERAGE_SOCCLK:
702 		*value = metrics->AverageSocclkFrequency;
703 		break;
704 	case METRICS_AVERAGE_UCLK:
705 		*value = metrics->AverageUclkFrequency;
706 		break;
707 	case METRICS_AVERAGE_GFXACTIVITY:
708 		*value = metrics->AverageGfxActivity;
709 		break;
710 	case METRICS_AVERAGE_MEMACTIVITY:
711 		*value = metrics->AverageUclkActivity;
712 		break;
713 	case METRICS_AVERAGE_SOCKETPOWER:
714 		/* Valid power data is available only from primary die */
715 		if (aldebaran_is_primary(smu))
716 			*value = metrics->AverageSocketPower << 8;
717 		else
718 			ret = -EOPNOTSUPP;
719 		break;
720 	case METRICS_TEMPERATURE_EDGE:
721 		*value = metrics->TemperatureEdge *
722 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
723 		break;
724 	case METRICS_TEMPERATURE_HOTSPOT:
725 		*value = metrics->TemperatureHotspot *
726 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
727 		break;
728 	case METRICS_TEMPERATURE_MEM:
729 		*value = metrics->TemperatureHBM *
730 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
731 		break;
732 	case METRICS_TEMPERATURE_VRGFX:
733 		*value = metrics->TemperatureVrGfx *
734 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
735 		break;
736 	case METRICS_TEMPERATURE_VRSOC:
737 		*value = metrics->TemperatureVrSoc *
738 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
739 		break;
740 	case METRICS_TEMPERATURE_VRMEM:
741 		*value = metrics->TemperatureVrMem *
742 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
743 		break;
744 	case METRICS_THROTTLER_STATUS:
745 		*value = metrics->ThrottlerStatus;
746 		break;
747 	case METRICS_UNIQUE_ID_UPPER32:
748 		*value = metrics->PublicSerialNumUpper32;
749 		break;
750 	case METRICS_UNIQUE_ID_LOWER32:
751 		*value = metrics->PublicSerialNumLower32;
752 		break;
753 	default:
754 		*value = UINT_MAX;
755 		break;
756 	}
757 
758 	return ret;
759 }
760 
761 static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu,
762 						   enum smu_clk_type clk_type,
763 						   uint32_t *value)
764 {
765 	MetricsMember_t member_type;
766 	int clk_id = 0;
767 
768 	if (!value)
769 		return -EINVAL;
770 
771 	clk_id = smu_cmn_to_asic_specific_index(smu,
772 						CMN2ASIC_MAPPING_CLK,
773 						clk_type);
774 	if (clk_id < 0)
775 		return -EINVAL;
776 
777 	switch (clk_id) {
778 	case PPCLK_GFXCLK:
779 		/*
780 		 * CurrClock[clk_id] can provide accurate
781 		 *   output only when the dpm feature is enabled.
782 		 * We can use Average_* for dpm disabled case.
783 		 *   But this is available for gfxclk/uclk/socclk/vclk/dclk.
784 		 */
785 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
786 			member_type = METRICS_CURR_GFXCLK;
787 		else
788 			member_type = METRICS_AVERAGE_GFXCLK;
789 		break;
790 	case PPCLK_UCLK:
791 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
792 			member_type = METRICS_CURR_UCLK;
793 		else
794 			member_type = METRICS_AVERAGE_UCLK;
795 		break;
796 	case PPCLK_SOCCLK:
797 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
798 			member_type = METRICS_CURR_SOCCLK;
799 		else
800 			member_type = METRICS_AVERAGE_SOCCLK;
801 		break;
802 	case PPCLK_VCLK:
803 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
804 			member_type = METRICS_CURR_VCLK;
805 		else
806 			member_type = METRICS_AVERAGE_VCLK;
807 		break;
808 	case PPCLK_DCLK:
809 		if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
810 			member_type = METRICS_CURR_DCLK;
811 		else
812 			member_type = METRICS_AVERAGE_DCLK;
813 		break;
814 	case PPCLK_FCLK:
815 		member_type = METRICS_CURR_FCLK;
816 		break;
817 	default:
818 		return -EINVAL;
819 	}
820 
821 	return aldebaran_get_smu_metrics_data(smu,
822 					      member_type,
823 					      value);
824 }
825 
826 static int aldebaran_emit_clk_levels(struct smu_context *smu,
827 				     enum smu_clk_type type, char *buf, int *offset)
828 {
829 	int ret = 0;
830 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
831 	struct pp_clock_levels_with_latency clocks;
832 	struct smu_13_0_dpm_table *single_dpm_table;
833 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
834 	struct smu_13_0_dpm_context *dpm_context = NULL;
835 	uint32_t i;
836 	int display_levels;
837 	uint32_t freq_values[3] = {0};
838 	uint32_t min_clk, max_clk, cur_value = 0;
839 	bool freq_match;
840 	unsigned int clock_mhz;
841 	static const char attempt_string[] = "Attempt to get current";
842 
843 	if (amdgpu_ras_intr_triggered()) {
844 		*offset += sysfs_emit_at(buf, *offset, "unavailable\n");
845 		return -EBUSY;
846 	}
847 
848 	dpm_context = smu_dpm->dpm_context;
849 
850 	switch (type) {
851 
852 	case SMU_OD_SCLK:
853 		*offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK");
854 		*offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n",
855 				      pstate_table->gfxclk_pstate.curr.min,
856 				      pstate_table->gfxclk_pstate.curr.max);
857 		return 0;
858 	case SMU_SCLK:
859 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value);
860 		if (ret) {
861 			dev_err(smu->adev->dev, "%s gfx clk Failed!", attempt_string);
862 			return ret;
863 		}
864 
865 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
866 		aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
867 
868 		display_levels = (clocks.num_levels == 1) ? 1 : 2;
869 
870 		min_clk = pstate_table->gfxclk_pstate.curr.min;
871 		max_clk = pstate_table->gfxclk_pstate.curr.max;
872 
873 		freq_values[0] = min_clk;
874 		freq_values[1] = max_clk;
875 
876 		/* fine-grained dpm has only 2 levels */
877 		if (cur_value > min_clk && cur_value < max_clk) {
878 			display_levels++;
879 			freq_values[2] = max_clk;
880 			freq_values[1] = cur_value;
881 		}
882 		break;
883 
884 	case SMU_OD_MCLK:
885 		*offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK");
886 		*offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n",
887 				      pstate_table->uclk_pstate.curr.min,
888 				      pstate_table->uclk_pstate.curr.max);
889 		return 0;
890 	case SMU_MCLK:
891 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value);
892 		if (ret) {
893 			dev_err(smu->adev->dev, "%s mclk Failed!", attempt_string);
894 			return ret;
895 		}
896 
897 		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
898 		aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
899 		break;
900 
901 	case SMU_SOCCLK:
902 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &cur_value);
903 		if (ret) {
904 			dev_err(smu->adev->dev, "%s socclk Failed!", attempt_string);
905 			return ret;
906 		}
907 
908 		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
909 		aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
910 		break;
911 
912 	case SMU_FCLK:
913 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &cur_value);
914 		if (ret) {
915 			dev_err(smu->adev->dev, "%s fclk Failed!", attempt_string);
916 			return ret;
917 		}
918 
919 		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
920 		aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
921 		break;
922 
923 	case SMU_VCLK:
924 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &cur_value);
925 		if (ret) {
926 			dev_err(smu->adev->dev, "%s vclk Failed!", attempt_string);
927 			return ret;
928 		}
929 
930 		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
931 		aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
932 		break;
933 
934 	case SMU_DCLK:
935 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &cur_value);
936 		if (ret) {
937 			dev_err(smu->adev->dev, "%s dclk Failed!", attempt_string);
938 			return ret;
939 		}
940 
941 		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
942 		aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
943 		break;
944 
945 	default:
946 		return -EINVAL;
947 	}
948 
949 	switch (type) {
950 	case SMU_SCLK:
951 		for (i = 0; i < display_levels; i++) {
952 			clock_mhz = freq_values[i];
953 			freq_match = aldebaran_freqs_in_same_level(clock_mhz, cur_value);
954 			freq_match |= (display_levels == 1);
955 
956 			*offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n", i,
957 				clock_mhz,
958 				(freq_match) ? "*" : "");
959 		}
960 		break;
961 
962 	case SMU_MCLK:
963 	case SMU_SOCCLK:
964 	case SMU_FCLK:
965 	case SMU_VCLK:
966 	case SMU_DCLK:
967 		for (i = 0; i < clocks.num_levels; i++) {
968 			clock_mhz = clocks.data[i].clocks_in_khz / 1000;
969 			freq_match = aldebaran_freqs_in_same_level(clock_mhz, cur_value);
970 			freq_match |= (clocks.num_levels == 1);
971 
972 			*offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
973 				i, clock_mhz,
974 				(freq_match) ? "*" : "");
975 		}
976 		break;
977 	default:
978 		return -EINVAL;
979 	}
980 
981 	return 0;
982 }
983 
984 static int aldebaran_upload_dpm_level(struct smu_context *smu,
985 				      bool max,
986 				      uint32_t feature_mask,
987 				      uint32_t level)
988 {
989 	struct smu_13_0_dpm_context *dpm_context =
990 		smu->smu_dpm.dpm_context;
991 	uint32_t freq;
992 	int ret = 0;
993 
994 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
995 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT))) {
996 		freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
997 		ret = smu_cmn_send_smc_msg_with_param(smu,
998 						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
999 						      (PPCLK_GFXCLK << 16) | (freq & 0xffff),
1000 						      NULL);
1001 		if (ret) {
1002 			dev_err(smu->adev->dev, "Failed to set soft %s gfxclk !\n",
1003 				max ? "max" : "min");
1004 			return ret;
1005 		}
1006 	}
1007 
1008 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
1009 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK_BIT))) {
1010 		freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value;
1011 		ret = smu_cmn_send_smc_msg_with_param(smu,
1012 						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
1013 						      (PPCLK_UCLK << 16) | (freq & 0xffff),
1014 						      NULL);
1015 		if (ret) {
1016 			dev_err(smu->adev->dev, "Failed to set soft %s memclk !\n",
1017 				max ? "max" : "min");
1018 			return ret;
1019 		}
1020 	}
1021 
1022 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
1023 	    (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT))) {
1024 		freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
1025 		ret = smu_cmn_send_smc_msg_with_param(smu,
1026 						      (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
1027 						      (PPCLK_SOCCLK << 16) | (freq & 0xffff),
1028 						      NULL);
1029 		if (ret) {
1030 			dev_err(smu->adev->dev, "Failed to set soft %s socclk !\n",
1031 				max ? "max" : "min");
1032 			return ret;
1033 		}
1034 	}
1035 
1036 	return ret;
1037 }
1038 
1039 static int aldebaran_force_clk_levels(struct smu_context *smu,
1040 				      enum smu_clk_type type, uint32_t mask)
1041 {
1042 	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1043 	struct smu_13_0_dpm_table *single_dpm_table = NULL;
1044 	uint32_t soft_min_level, soft_max_level;
1045 	int ret = 0;
1046 
1047 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1048 	soft_max_level = mask ? (fls(mask) - 1) : 0;
1049 
1050 	switch (type) {
1051 	case SMU_SCLK:
1052 		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1053 		if (soft_max_level >= single_dpm_table->count) {
1054 			dev_err(smu->adev->dev, "Clock level specified %d is over max allowed %d\n",
1055 				soft_max_level, single_dpm_table->count - 1);
1056 			ret = -EINVAL;
1057 			break;
1058 		}
1059 
1060 		ret = aldebaran_upload_dpm_level(smu,
1061 						 false,
1062 						 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT),
1063 						 soft_min_level);
1064 		if (ret) {
1065 			dev_err(smu->adev->dev, "Failed to upload boot level to lowest!\n");
1066 			break;
1067 		}
1068 
1069 		ret = aldebaran_upload_dpm_level(smu,
1070 						 true,
1071 						 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT),
1072 						 soft_max_level);
1073 		if (ret)
1074 			dev_err(smu->adev->dev, "Failed to upload dpm max level to highest!\n");
1075 
1076 		break;
1077 
1078 	case SMU_MCLK:
1079 	case SMU_SOCCLK:
1080 	case SMU_FCLK:
1081 		/*
1082 		 * Should not arrive here since aldebaran does not
1083 		 * support mclk/socclk/fclk softmin/softmax settings
1084 		 */
1085 		ret = -EINVAL;
1086 		break;
1087 
1088 	default:
1089 		break;
1090 	}
1091 
1092 	return ret;
1093 }
1094 
1095 static int aldebaran_get_thermal_temperature_range(struct smu_context *smu,
1096 						   struct smu_temperature_range *range)
1097 {
1098 	struct smu_table_context *table_context = &smu->smu_table;
1099 	struct smu_13_0_powerplay_table *powerplay_table =
1100 		table_context->power_play_table;
1101 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1102 
1103 	if (!range)
1104 		return -EINVAL;
1105 
1106 	memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
1107 
1108 	range->hotspot_crit_max = pptable->ThotspotLimit *
1109 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1110 	range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
1111 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1112 	range->mem_crit_max = pptable->TmemLimit *
1113 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1114 	range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
1115 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1116 	range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1117 
1118 	return 0;
1119 }
1120 
1121 static int aldebaran_get_current_activity_percent(struct smu_context *smu,
1122 						  enum amd_pp_sensors sensor,
1123 						  uint32_t *value)
1124 {
1125 	int ret = 0;
1126 
1127 	if (!value)
1128 		return -EINVAL;
1129 
1130 	switch (sensor) {
1131 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1132 		ret = aldebaran_get_smu_metrics_data(smu,
1133 						     METRICS_AVERAGE_GFXACTIVITY,
1134 						     value);
1135 		break;
1136 	case AMDGPU_PP_SENSOR_MEM_LOAD:
1137 		ret = aldebaran_get_smu_metrics_data(smu,
1138 						     METRICS_AVERAGE_MEMACTIVITY,
1139 						     value);
1140 		break;
1141 	default:
1142 		dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n");
1143 		return -EINVAL;
1144 	}
1145 
1146 	return ret;
1147 }
1148 
1149 static int aldebaran_thermal_get_temperature(struct smu_context *smu,
1150 					     enum amd_pp_sensors sensor,
1151 					     uint32_t *value)
1152 {
1153 	int ret = 0;
1154 
1155 	if (!value)
1156 		return -EINVAL;
1157 
1158 	switch (sensor) {
1159 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1160 		ret = aldebaran_get_smu_metrics_data(smu,
1161 						     METRICS_TEMPERATURE_HOTSPOT,
1162 						     value);
1163 		break;
1164 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1165 		ret = aldebaran_get_smu_metrics_data(smu,
1166 						     METRICS_TEMPERATURE_EDGE,
1167 						     value);
1168 		break;
1169 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1170 		ret = aldebaran_get_smu_metrics_data(smu,
1171 						     METRICS_TEMPERATURE_MEM,
1172 						     value);
1173 		break;
1174 	default:
1175 		dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1176 		return -EINVAL;
1177 	}
1178 
1179 	return ret;
1180 }
1181 
1182 static int aldebaran_read_sensor(struct smu_context *smu,
1183 				 enum amd_pp_sensors sensor,
1184 				 void *data, uint32_t *size)
1185 {
1186 	int ret = 0;
1187 
1188 	if (amdgpu_ras_intr_triggered())
1189 		return 0;
1190 
1191 	if (!data || !size)
1192 		return -EINVAL;
1193 
1194 	switch (sensor) {
1195 	case AMDGPU_PP_SENSOR_MEM_LOAD:
1196 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1197 		ret = aldebaran_get_current_activity_percent(smu,
1198 							     sensor,
1199 							     (uint32_t *)data);
1200 		*size = 4;
1201 		break;
1202 	case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
1203 		ret = aldebaran_get_smu_metrics_data(smu,
1204 						     METRICS_AVERAGE_SOCKETPOWER,
1205 						     (uint32_t *)data);
1206 		*size = 4;
1207 		break;
1208 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1209 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1210 	case AMDGPU_PP_SENSOR_MEM_TEMP:
1211 		ret = aldebaran_thermal_get_temperature(smu, sensor,
1212 							(uint32_t *)data);
1213 		*size = 4;
1214 		break;
1215 	case AMDGPU_PP_SENSOR_GFX_MCLK:
1216 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
1217 		/* the output clock frequency in 10K unit */
1218 		*(uint32_t *)data *= 100;
1219 		*size = 4;
1220 		break;
1221 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1222 		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
1223 		*(uint32_t *)data *= 100;
1224 		*size = 4;
1225 		break;
1226 	case AMDGPU_PP_SENSOR_VDDGFX:
1227 		ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
1228 		*size = 4;
1229 		break;
1230 	case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
1231 	default:
1232 		ret = -EOPNOTSUPP;
1233 		break;
1234 	}
1235 
1236 	return ret;
1237 }
1238 
1239 static int aldebaran_get_power_limit(struct smu_context *smu,
1240 						uint32_t *current_power_limit,
1241 						uint32_t *default_power_limit,
1242 						uint32_t *max_power_limit,
1243 						uint32_t *min_power_limit)
1244 {
1245 	PPTable_t *pptable = smu->smu_table.driver_pptable;
1246 	uint32_t power_limit = 0;
1247 	int ret;
1248 
1249 	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1250 		if (current_power_limit)
1251 			*current_power_limit = 0;
1252 		if (default_power_limit)
1253 			*default_power_limit = 0;
1254 		if (max_power_limit)
1255 			*max_power_limit = 0;
1256 		if (min_power_limit)
1257 			*min_power_limit = 0;
1258 		dev_warn(smu->adev->dev,
1259 			"PPT feature is not enabled, power values can't be fetched.");
1260 
1261 		return 0;
1262 	}
1263 
1264 	/* Valid power data is available only from primary die.
1265 	 * For secondary die show the value as 0.
1266 	 */
1267 	if (aldebaran_is_primary(smu)) {
1268 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit,
1269 					   &power_limit);
1270 
1271 		if (ret) {
1272 			/* the last hope to figure out the ppt limit */
1273 			if (!pptable) {
1274 				dev_err(smu->adev->dev,
1275 					"Cannot get PPT limit due to pptable missing!");
1276 				return -EINVAL;
1277 			}
1278 			power_limit = pptable->PptLimit;
1279 		}
1280 	}
1281 
1282 	if (current_power_limit)
1283 		*current_power_limit = power_limit;
1284 	if (default_power_limit)
1285 		*default_power_limit = power_limit;
1286 
1287 	if (max_power_limit) {
1288 		if (pptable)
1289 			*max_power_limit = pptable->PptLimit;
1290 	}
1291 
1292 	if (min_power_limit)
1293 		*min_power_limit = 0;
1294 
1295 	return 0;
1296 }
1297 
1298 static int aldebaran_set_power_limit(struct smu_context *smu,
1299 				     enum smu_ppt_limit_type limit_type,
1300 				     uint32_t limit)
1301 {
1302 	/* Power limit can be set only through primary die */
1303 	if (aldebaran_is_primary(smu))
1304 		return smu_v13_0_set_power_limit(smu, limit_type, limit);
1305 
1306 	return -EINVAL;
1307 }
1308 
1309 static int aldebaran_system_features_control(struct  smu_context *smu, bool enable)
1310 {
1311 	int ret;
1312 
1313 	ret = smu_v13_0_system_features_control(smu, enable);
1314 	if (!ret && enable)
1315 		ret = aldebaran_run_btc(smu);
1316 
1317 	return ret;
1318 }
1319 
1320 static int aldebaran_set_performance_level(struct smu_context *smu,
1321 					   enum amd_dpm_forced_level level)
1322 {
1323 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1324 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1325 	struct smu_13_0_dpm_table *gfx_table =
1326 		&dpm_context->dpm_tables.gfx_table;
1327 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1328 	int r;
1329 
1330 	/* Disable determinism if switching to another mode */
1331 	if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
1332 	    (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
1333 		smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
1334 		pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1335 	}
1336 
1337 	switch (level) {
1338 
1339 	case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
1340 		return 0;
1341 	case AMD_DPM_FORCED_LEVEL_AUTO:
1342 		r = smu_v13_0_set_performance_level(smu, level);
1343 		if (!r)
1344 			smu_v13_0_reset_custom_level(smu);
1345 		return r;
1346 	case AMD_DPM_FORCED_LEVEL_HIGH:
1347 	case AMD_DPM_FORCED_LEVEL_LOW:
1348 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1349 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1350 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1351 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1352 	default:
1353 		break;
1354 	}
1355 
1356 	return smu_v13_0_set_performance_level(smu, level);
1357 }
1358 
1359 static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
1360 						 enum smu_clk_type clk_type,
1361 						 uint32_t min,
1362 						 uint32_t max,
1363 						 bool automatic)
1364 {
1365 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1366 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1367 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1368 	struct amdgpu_device *adev = smu->adev;
1369 	uint32_t min_clk;
1370 	uint32_t max_clk;
1371 	int ret = 0;
1372 
1373 	if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
1374 		return -EINVAL;
1375 
1376 	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1377 			&& (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1378 		return -EINVAL;
1379 
1380 	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
1381 		if (min >= max) {
1382 			dev_err(smu->adev->dev,
1383 				"Minimum GFX clk should be less than the maximum allowed clock\n");
1384 			return -EINVAL;
1385 		}
1386 
1387 		if ((min == pstate_table->gfxclk_pstate.curr.min) &&
1388 		    (max == pstate_table->gfxclk_pstate.curr.max))
1389 			return 0;
1390 
1391 		ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
1392 							    min, max, false);
1393 		if (!ret) {
1394 			pstate_table->gfxclk_pstate.curr.min = min;
1395 			pstate_table->gfxclk_pstate.curr.max = max;
1396 		}
1397 
1398 		return ret;
1399 	}
1400 
1401 	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1402 		if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
1403 			(max > dpm_context->dpm_tables.gfx_table.max)) {
1404 			dev_warn(adev->dev,
1405 					"Invalid max frequency %d MHz specified for determinism\n", max);
1406 			return -EINVAL;
1407 		}
1408 
1409 		/* Restore default min/max clocks and enable determinism */
1410 		min_clk = dpm_context->dpm_tables.gfx_table.min;
1411 		max_clk = dpm_context->dpm_tables.gfx_table.max;
1412 		ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
1413 		if (!ret) {
1414 			usleep_range(500, 1000);
1415 			ret = smu_cmn_send_smc_msg_with_param(smu,
1416 					SMU_MSG_EnableDeterminism,
1417 					max, NULL);
1418 			if (ret) {
1419 				dev_err(adev->dev,
1420 						"Failed to enable determinism at GFX clock %d MHz\n", max);
1421 			} else {
1422 				pstate_table->gfxclk_pstate.curr.min = min_clk;
1423 				pstate_table->gfxclk_pstate.curr.max = max;
1424 			}
1425 		}
1426 	}
1427 
1428 	return ret;
1429 }
1430 
1431 static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
1432 							long input[], uint32_t size)
1433 {
1434 	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1435 	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1436 	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1437 	uint32_t min_clk;
1438 	uint32_t max_clk;
1439 	int ret = 0;
1440 
1441 	/* Only allowed in manual or determinism mode */
1442 	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1443 			&& (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1444 		return -EINVAL;
1445 
1446 	switch (type) {
1447 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
1448 		if (size != 2) {
1449 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1450 			return -EINVAL;
1451 		}
1452 
1453 		if (input[0] == 0) {
1454 			if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
1455 				dev_warn(smu->adev->dev, "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
1456 					input[1], dpm_context->dpm_tables.gfx_table.min);
1457 				pstate_table->gfxclk_pstate.custom.min =
1458 					pstate_table->gfxclk_pstate.curr.min;
1459 				return -EINVAL;
1460 			}
1461 
1462 			pstate_table->gfxclk_pstate.custom.min = input[1];
1463 		} else if (input[0] == 1) {
1464 			if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
1465 				dev_warn(smu->adev->dev, "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
1466 					input[1], dpm_context->dpm_tables.gfx_table.max);
1467 				pstate_table->gfxclk_pstate.custom.max =
1468 					pstate_table->gfxclk_pstate.curr.max;
1469 				return -EINVAL;
1470 			}
1471 
1472 			pstate_table->gfxclk_pstate.custom.max = input[1];
1473 		} else {
1474 			return -EINVAL;
1475 		}
1476 		break;
1477 	case PP_OD_RESTORE_DEFAULT_TABLE:
1478 		if (size != 0) {
1479 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1480 			return -EINVAL;
1481 		} else {
1482 			/* Use the default frequencies for manual and determinism mode */
1483 			min_clk = dpm_context->dpm_tables.gfx_table.min;
1484 			max_clk = dpm_context->dpm_tables.gfx_table.max;
1485 
1486 			ret = aldebaran_set_soft_freq_limited_range(
1487 				smu, SMU_GFXCLK, min_clk, max_clk, false);
1488 			if (ret)
1489 				return ret;
1490 			smu_v13_0_reset_custom_level(smu);
1491 		}
1492 		break;
1493 	case PP_OD_COMMIT_DPM_TABLE:
1494 		if (size != 0) {
1495 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
1496 			return -EINVAL;
1497 		} else {
1498 			if (!pstate_table->gfxclk_pstate.custom.min)
1499 				pstate_table->gfxclk_pstate.custom.min =
1500 					pstate_table->gfxclk_pstate.curr.min;
1501 
1502 			if (!pstate_table->gfxclk_pstate.custom.max)
1503 				pstate_table->gfxclk_pstate.custom.max =
1504 					pstate_table->gfxclk_pstate.curr.max;
1505 
1506 			min_clk = pstate_table->gfxclk_pstate.custom.min;
1507 			max_clk = pstate_table->gfxclk_pstate.custom.max;
1508 
1509 			return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
1510 		}
1511 		break;
1512 	default:
1513 		return -ENOSYS;
1514 	}
1515 
1516 	return ret;
1517 }
1518 
1519 static bool aldebaran_is_dpm_running(struct smu_context *smu)
1520 {
1521 	int ret;
1522 	uint64_t feature_enabled;
1523 
1524 	ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
1525 	if (ret)
1526 		return false;
1527 	return !!(feature_enabled & SMC_DPM_FEATURE);
1528 }
1529 
1530 static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
1531 			      struct i2c_msg *msg, int num_msgs)
1532 {
1533 	struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1534 	struct amdgpu_device *adev = smu_i2c->adev;
1535 	struct smu_context *smu = adev->powerplay.pp_handle;
1536 	struct smu_table_context *smu_table = &smu->smu_table;
1537 	struct smu_table *table = &smu_table->driver_table;
1538 	SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1539 	int i, j, r, c;
1540 	u16 dir;
1541 
1542 	if (!adev->pm.dpm_enabled)
1543 		return -EBUSY;
1544 
1545 	req = kzalloc(sizeof(*req), GFP_KERNEL);
1546 	if (!req)
1547 		return -ENOMEM;
1548 
1549 	req->I2CcontrollerPort = smu_i2c->port;
1550 	req->I2CSpeed = I2C_SPEED_FAST_400K;
1551 	req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1552 	dir = msg[0].flags & I2C_M_RD;
1553 
1554 	for (c = i = 0; i < num_msgs; i++) {
1555 		for (j = 0; j < msg[i].len; j++, c++) {
1556 			SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1557 
1558 			if (!(msg[i].flags & I2C_M_RD)) {
1559 				/* write */
1560 				cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1561 				cmd->ReadWriteData = msg[i].buf[j];
1562 			}
1563 
1564 			if ((dir ^ msg[i].flags) & I2C_M_RD) {
1565 				/* The direction changes.
1566 				 */
1567 				dir = msg[i].flags & I2C_M_RD;
1568 				cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1569 			}
1570 
1571 			req->NumCmds++;
1572 
1573 			/*
1574 			 * Insert STOP if we are at the last byte of either last
1575 			 * message for the transaction or the client explicitly
1576 			 * requires a STOP at this particular message.
1577 			 */
1578 			if ((j == msg[i].len - 1) &&
1579 			    ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
1580 				cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
1581 				cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
1582 			}
1583 		}
1584 	}
1585 	mutex_lock(&adev->pm.mutex);
1586 	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
1587 	if (r)
1588 		goto fail;
1589 
1590 	for (c = i = 0; i < num_msgs; i++) {
1591 		if (!(msg[i].flags & I2C_M_RD)) {
1592 			c += msg[i].len;
1593 			continue;
1594 		}
1595 		for (j = 0; j < msg[i].len; j++, c++) {
1596 			SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
1597 
1598 			msg[i].buf[j] = cmd->ReadWriteData;
1599 		}
1600 	}
1601 	r = num_msgs;
1602 fail:
1603 	mutex_unlock(&adev->pm.mutex);
1604 	kfree(req);
1605 	return r;
1606 }
1607 
1608 static u32 aldebaran_i2c_func(struct i2c_adapter *adap)
1609 {
1610 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1611 }
1612 
1613 
1614 static const struct i2c_algorithm aldebaran_i2c_algo = {
1615 	.master_xfer = aldebaran_i2c_xfer,
1616 	.functionality = aldebaran_i2c_func,
1617 };
1618 
1619 static const struct i2c_adapter_quirks aldebaran_i2c_control_quirks = {
1620 	.flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
1621 	.max_read_len  = MAX_SW_I2C_COMMANDS,
1622 	.max_write_len = MAX_SW_I2C_COMMANDS,
1623 	.max_comb_1st_msg_len = 2,
1624 	.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
1625 };
1626 
1627 static int aldebaran_i2c_control_init(struct smu_context *smu)
1628 {
1629 	struct amdgpu_device *adev = smu->adev;
1630 	struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0];
1631 	struct i2c_adapter *control = &smu_i2c->adapter;
1632 	int res;
1633 
1634 	smu_i2c->adev = adev;
1635 	smu_i2c->port = 0;
1636 	mutex_init(&smu_i2c->mutex);
1637 	control->owner = THIS_MODULE;
1638 	control->dev.parent = &adev->pdev->dev;
1639 	control->algo = &aldebaran_i2c_algo;
1640 	snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
1641 	control->quirks = &aldebaran_i2c_control_quirks;
1642 	i2c_set_adapdata(control, smu_i2c);
1643 
1644 	res = i2c_add_adapter(control);
1645 	if (res) {
1646 		DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
1647 		goto Out_err;
1648 	}
1649 
1650 	adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1651 	adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1652 
1653 	return 0;
1654 Out_err:
1655 	i2c_del_adapter(control);
1656 
1657 	return res;
1658 }
1659 
1660 static void aldebaran_i2c_control_fini(struct smu_context *smu)
1661 {
1662 	struct amdgpu_device *adev = smu->adev;
1663 	int i;
1664 
1665 	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1666 		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1667 		struct i2c_adapter *control = &smu_i2c->adapter;
1668 
1669 		i2c_del_adapter(control);
1670 	}
1671 	adev->pm.ras_eeprom_i2c_bus = NULL;
1672 	adev->pm.fru_eeprom_i2c_bus = NULL;
1673 }
1674 
1675 static void aldebaran_get_unique_id(struct smu_context *smu)
1676 {
1677 	struct amdgpu_device *adev = smu->adev;
1678 	uint32_t upper32 = 0, lower32 = 0;
1679 
1680 	if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_UPPER32, &upper32))
1681 		goto out;
1682 	if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_LOWER32, &lower32))
1683 		goto out;
1684 
1685 out:
1686 	adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1687 }
1688 
1689 static int aldebaran_get_bamaco_support(struct smu_context *smu)
1690 {
1691 	/* aldebaran is not support baco */
1692 
1693 	return 0;
1694 }
1695 
1696 static int aldebaran_set_df_cstate(struct smu_context *smu,
1697 				   enum pp_df_cstate state)
1698 {
1699 	struct amdgpu_device *adev = smu->adev;
1700 
1701 	/*
1702 	 * Aldebaran does not need the cstate disablement
1703 	 * prerequisite for gpu reset.
1704 	 */
1705 	if (amdgpu_in_reset(adev) || adev->in_suspend)
1706 		return 0;
1707 
1708 	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
1709 }
1710 
1711 static const struct throttling_logging_label {
1712 	uint32_t feature_mask;
1713 	const char *label;
1714 } logging_label[] = {
1715 	{(1U << THROTTLER_TEMP_GPU_BIT), "GPU"},
1716 	{(1U << THROTTLER_TEMP_MEM_BIT), "HBM"},
1717 	{(1U << THROTTLER_TEMP_VR_GFX_BIT), "VR of GFX rail"},
1718 	{(1U << THROTTLER_TEMP_VR_MEM_BIT), "VR of HBM rail"},
1719 	{(1U << THROTTLER_TEMP_VR_SOC_BIT), "VR of SOC rail"},
1720 };
1721 static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
1722 {
1723 	int ret;
1724 	int throttler_idx, throttling_events = 0, buf_idx = 0;
1725 	struct amdgpu_device *adev = smu->adev;
1726 	uint32_t throttler_status;
1727 	char log_buf[256];
1728 
1729 	ret = aldebaran_get_smu_metrics_data(smu,
1730 					     METRICS_THROTTLER_STATUS,
1731 					     &throttler_status);
1732 	if (ret)
1733 		return;
1734 
1735 	memset(log_buf, 0, sizeof(log_buf));
1736 	for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
1737 	     throttler_idx++) {
1738 		if (throttler_status & logging_label[throttler_idx].feature_mask) {
1739 			throttling_events++;
1740 			buf_idx += snprintf(log_buf + buf_idx,
1741 					    sizeof(log_buf) - buf_idx,
1742 					    "%s%s",
1743 					    throttling_events > 1 ? " and " : "",
1744 					    logging_label[throttler_idx].label);
1745 			if (buf_idx >= sizeof(log_buf)) {
1746 				dev_err(adev->dev, "buffer overflow!\n");
1747 				log_buf[sizeof(log_buf) - 1] = '\0';
1748 				break;
1749 			}
1750 		}
1751 	}
1752 
1753 	dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
1754 		 log_buf);
1755 	kgd2kfd_smi_event_throttle(smu->adev->kfd.dev,
1756 		smu_cmn_get_indep_throttler_status(throttler_status,
1757 						   aldebaran_throttler_map));
1758 }
1759 
1760 static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu)
1761 {
1762 	struct amdgpu_device *adev = smu->adev;
1763 	uint32_t esm_ctrl;
1764 
1765 	/* TODO: confirm this on real target */
1766 	esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
1767 	if ((esm_ctrl >> 15) & 0x1)
1768 		return (((esm_ctrl >> 8) & 0x7F) + 128);
1769 
1770 	return smu_v13_0_get_current_pcie_link_speed(smu);
1771 }
1772 
1773 static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
1774 					 void **table)
1775 {
1776 	struct smu_table_context *smu_table = &smu->smu_table;
1777 	struct gpu_metrics_v1_3 *gpu_metrics =
1778 		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
1779 	SmuMetrics_t metrics;
1780 	int i, ret = 0;
1781 
1782 	ret = smu_cmn_get_metrics_table(smu,
1783 					&metrics,
1784 					true);
1785 	if (ret)
1786 		return ret;
1787 
1788 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
1789 
1790 	gpu_metrics->temperature_edge = metrics.TemperatureEdge;
1791 	gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
1792 	gpu_metrics->temperature_mem = metrics.TemperatureHBM;
1793 	gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
1794 	gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
1795 	gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
1796 
1797 	gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
1798 	gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
1799 
1800 	/* Valid power data is available only from primary die */
1801 	if (aldebaran_is_primary(smu)) {
1802 		gpu_metrics->average_socket_power = metrics.AverageSocketPower;
1803 		gpu_metrics->energy_accumulator =
1804 			(uint64_t)metrics.EnergyAcc64bitHigh << 32 |
1805 			metrics.EnergyAcc64bitLow;
1806 	} else {
1807 		gpu_metrics->average_socket_power = 0;
1808 		gpu_metrics->energy_accumulator = 0;
1809 	}
1810 
1811 	gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
1812 	gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
1813 	gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
1814 	gpu_metrics->average_vclk0_frequency = 0;
1815 	gpu_metrics->average_dclk0_frequency = 0;
1816 
1817 	gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
1818 	gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
1819 	gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
1820 	gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
1821 	gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
1822 
1823 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1824 	gpu_metrics->indep_throttle_status =
1825 			smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1826 							   aldebaran_throttler_map);
1827 
1828 	gpu_metrics->current_fan_speed = 0;
1829 
1830 	if (!amdgpu_sriov_vf(smu->adev)) {
1831 		gpu_metrics->pcie_link_width =
1832 			smu_v13_0_get_current_pcie_link_width(smu);
1833 		gpu_metrics->pcie_link_speed =
1834 			aldebaran_get_current_pcie_link_speed(smu);
1835 	}
1836 
1837 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1838 
1839 	gpu_metrics->gfx_activity_acc = metrics.GfxBusyAcc;
1840 	gpu_metrics->mem_activity_acc = metrics.DramBusyAcc;
1841 
1842 	for (i = 0; i < NUM_HBM_INSTANCES; i++)
1843 		gpu_metrics->temperature_hbm[i] = metrics.TemperatureAllHBM[i];
1844 
1845 	gpu_metrics->firmware_timestamp = ((uint64_t)metrics.TimeStampHigh << 32) |
1846 					metrics.TimeStampLow;
1847 
1848 	*table = (void *)gpu_metrics;
1849 
1850 	return sizeof(struct gpu_metrics_v1_3);
1851 }
1852 
1853 static int aldebaran_check_ecc_table_support(struct smu_context *smu,
1854 		int *ecctable_version)
1855 {
1856 	if (smu->smc_fw_version < SUPPORT_ECCTABLE_SMU_VERSION)
1857 		return -EOPNOTSUPP;
1858 	else if (smu->smc_fw_version >= SUPPORT_ECCTABLE_SMU_VERSION &&
1859 			smu->smc_fw_version < SUPPORT_ECCTABLE_V2_SMU_VERSION)
1860 		*ecctable_version = 1;
1861 	else
1862 		*ecctable_version = 2;
1863 
1864 	return 0;
1865 }
1866 
1867 static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
1868 					 void *table)
1869 {
1870 	struct smu_table_context *smu_table = &smu->smu_table;
1871 	EccInfoTable_t *ecc_table = NULL;
1872 	struct ecc_info_per_ch *ecc_info_per_channel = NULL;
1873 	int i, ret = 0;
1874 	int table_version = 0;
1875 	struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
1876 
1877 	ret = aldebaran_check_ecc_table_support(smu, &table_version);
1878 	if (ret)
1879 		return ret;
1880 
1881 	ret = smu_cmn_update_table(smu,
1882 			       SMU_TABLE_ECCINFO,
1883 			       0,
1884 			       smu_table->ecc_table,
1885 			       false);
1886 	if (ret) {
1887 		dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n");
1888 		return ret;
1889 	}
1890 
1891 	ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
1892 
1893 	if (table_version == 1) {
1894 		for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
1895 			ecc_info_per_channel = &(eccinfo->ecc[i]);
1896 			ecc_info_per_channel->ce_count_lo_chip =
1897 				ecc_table->EccInfo[i].ce_count_lo_chip;
1898 			ecc_info_per_channel->ce_count_hi_chip =
1899 				ecc_table->EccInfo[i].ce_count_hi_chip;
1900 			ecc_info_per_channel->mca_umc_status =
1901 				ecc_table->EccInfo[i].mca_umc_status;
1902 			ecc_info_per_channel->mca_umc_addr =
1903 				ecc_table->EccInfo[i].mca_umc_addr;
1904 		}
1905 	} else if (table_version == 2) {
1906 		for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
1907 			ecc_info_per_channel = &(eccinfo->ecc[i]);
1908 			ecc_info_per_channel->ce_count_lo_chip =
1909 				ecc_table->EccInfo_V2[i].ce_count_lo_chip;
1910 			ecc_info_per_channel->ce_count_hi_chip =
1911 				ecc_table->EccInfo_V2[i].ce_count_hi_chip;
1912 			ecc_info_per_channel->mca_umc_status =
1913 				ecc_table->EccInfo_V2[i].mca_umc_status;
1914 			ecc_info_per_channel->mca_umc_addr =
1915 				ecc_table->EccInfo_V2[i].mca_umc_addr;
1916 			ecc_info_per_channel->mca_ceumc_addr =
1917 				ecc_table->EccInfo_V2[i].mca_ceumc_addr;
1918 		}
1919 		eccinfo->record_ce_addr_supported = 1;
1920 	}
1921 
1922 	return ret;
1923 }
1924 
1925 static int aldebaran_mode1_reset(struct smu_context *smu)
1926 {
1927 	u32 fatal_err, param;
1928 	int ret = 0;
1929 	struct amdgpu_device *adev = smu->adev;
1930 
1931 	fatal_err = 0;
1932 	param = SMU_RESET_MODE_1;
1933 
1934 	/*
1935 	* PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07
1936 	*/
1937 	if (smu->smc_fw_version < 0x00440700) {
1938 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
1939 	} else {
1940 		/* fatal error triggered by ras, PMFW supports the flag
1941 		   from 68.44.0 */
1942 		if ((smu->smc_fw_version >= 0x00442c00) &&
1943 		    amdgpu_ras_get_fed_status(adev))
1944 			fatal_err = 1;
1945 
1946 		param |= (fatal_err << 16);
1947 		ret = smu_cmn_send_smc_msg_with_param(smu,
1948 					SMU_MSG_GfxDeviceDriverReset, param, NULL);
1949 	}
1950 
1951 	if (!ret)
1952 		msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
1953 
1954 	return ret;
1955 }
1956 
1957 static int aldebaran_mode2_reset(struct smu_context *smu)
1958 {
1959 	int ret = 0, index;
1960 	struct amdgpu_device *adev = smu->adev;
1961 	int timeout = 10;
1962 
1963 	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1964 						SMU_MSG_GfxDeviceDriverReset);
1965 	if (index < 0 )
1966 		return -EINVAL;
1967 	mutex_lock(&smu->message_lock);
1968 	if (smu->smc_fw_version >= 0x00441400) {
1969 		ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
1970 		/* This is similar to FLR, wait till max FLR timeout */
1971 		msleep(100);
1972 		dev_dbg(smu->adev->dev, "restore config space...\n");
1973 		/* Restore the config space saved during init */
1974 		amdgpu_device_load_pci_state(adev->pdev);
1975 
1976 		dev_dbg(smu->adev->dev, "wait for reset ack\n");
1977 		while (ret == -ETIME && timeout)  {
1978 			ret = smu_cmn_wait_for_response(smu);
1979 			/* Wait a bit more time for getting ACK */
1980 			if (ret == -ETIME) {
1981 				--timeout;
1982 				usleep_range(500, 1000);
1983 				continue;
1984 			}
1985 
1986 			if (ret != 1) {
1987 				dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n",
1988 						SMU_RESET_MODE_2, ret);
1989 				goto out;
1990 			}
1991 		}
1992 
1993 	} else {
1994 		dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
1995 				smu->smc_fw_version);
1996 	}
1997 
1998 	if (ret == 1)
1999 		ret = 0;
2000 out:
2001 	mutex_unlock(&smu->message_lock);
2002 
2003 	return ret;
2004 }
2005 
2006 static int aldebaran_smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
2007 {
2008 	int ret = 0;
2009 	ret =  smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_HeavySBR, enable ? 1 : 0, NULL);
2010 
2011 	return ret;
2012 }
2013 
2014 static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
2015 {
2016 #if 0
2017 	struct amdgpu_device *adev = smu->adev;
2018 	uint32_t val;
2019 	uint32_t smu_version;
2020 	int ret;
2021 
2022 	/**
2023 	 * PM FW version support mode1 reset from 68.07
2024 	 */
2025 	ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
2026 	if (ret)
2027 		return false;
2028 
2029 	if ((smu_version < 0x00440700))
2030 		return false;
2031 
2032 	/**
2033 	 * mode1 reset relies on PSP, so we should check if
2034 	 * PSP is alive.
2035 	 */
2036 	val = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
2037 
2038 	return val != 0x0;
2039 #endif
2040 	return true;
2041 }
2042 
2043 static int aldebaran_set_mp1_state(struct smu_context *smu,
2044 				   enum pp_mp1_state mp1_state)
2045 {
2046 	switch (mp1_state) {
2047 	case PP_MP1_STATE_UNLOAD:
2048 		return smu_cmn_set_mp1_state(smu, mp1_state);
2049 	default:
2050 		return 0;
2051 	}
2052 }
2053 
2054 static int aldebaran_smu_send_hbm_bad_page_num(struct smu_context *smu,
2055 		uint32_t size)
2056 {
2057 	int ret = 0;
2058 
2059 	/* message SMU to update the bad page number on SMUBUS */
2060 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
2061 	if (ret)
2062 		dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad pages number\n",
2063 				__func__);
2064 
2065 	return ret;
2066 }
2067 
2068 static int aldebaran_check_bad_channel_info_support(struct smu_context *smu)
2069 {
2070 	if (smu->smc_fw_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION)
2071 		return -EOPNOTSUPP;
2072 
2073 	return 0;
2074 }
2075 
2076 static int aldebaran_send_hbm_bad_channel_flag(struct smu_context *smu,
2077 		uint32_t size)
2078 {
2079 	int ret = 0;
2080 
2081 	ret = aldebaran_check_bad_channel_info_support(smu);
2082 	if (ret)
2083 		return ret;
2084 
2085 	/* message SMU to update the bad channel info on SMUBUS */
2086 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetBadHBMPagesRetiredFlagsPerChannel, size, NULL);
2087 	if (ret)
2088 		dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad channel info\n",
2089 				__func__);
2090 
2091 	return ret;
2092 }
2093 
2094 static const struct pptable_funcs aldebaran_ppt_funcs = {
2095 	/* init dpm */
2096 	.get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
2097 	/* dpm/clk tables */
2098 	.set_default_dpm_table = aldebaran_set_default_dpm_table,
2099 	.populate_umd_state_clk = aldebaran_populate_umd_state_clk,
2100 	.get_thermal_temperature_range = aldebaran_get_thermal_temperature_range,
2101 	.emit_clk_levels = aldebaran_emit_clk_levels,
2102 	.force_clk_levels = aldebaran_force_clk_levels,
2103 	.read_sensor = aldebaran_read_sensor,
2104 	.set_performance_level = aldebaran_set_performance_level,
2105 	.get_power_limit = aldebaran_get_power_limit,
2106 	.is_dpm_running = aldebaran_is_dpm_running,
2107 	.get_unique_id = aldebaran_get_unique_id,
2108 	.init_microcode = smu_v13_0_init_microcode,
2109 	.load_microcode = smu_v13_0_load_microcode,
2110 	.fini_microcode = smu_v13_0_fini_microcode,
2111 	.init_smc_tables = aldebaran_init_smc_tables,
2112 	.fini_smc_tables = smu_v13_0_fini_smc_tables,
2113 	.init_power = smu_v13_0_init_power,
2114 	.fini_power = smu_v13_0_fini_power,
2115 	.check_fw_status = smu_v13_0_check_fw_status,
2116 	/* pptable related */
2117 	.setup_pptable = aldebaran_setup_pptable,
2118 	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
2119 	.check_fw_version = smu_v13_0_check_fw_version,
2120 	.write_pptable = smu_cmn_write_pptable,
2121 	.set_driver_table_location = smu_v13_0_set_driver_table_location,
2122 	.set_tool_table_location = smu_v13_0_set_tool_table_location,
2123 	.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
2124 	.system_features_control = aldebaran_system_features_control,
2125 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2126 	.send_smc_msg = smu_cmn_send_smc_msg,
2127 	.get_enabled_mask = smu_cmn_get_enabled_mask,
2128 	.feature_is_enabled = smu_cmn_feature_is_enabled,
2129 	.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
2130 	.set_power_limit = aldebaran_set_power_limit,
2131 	.init_max_sustainable_clocks = smu_v13_0_init_max_sustainable_clocks,
2132 	.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
2133 	.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
2134 	.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
2135 	.register_irq_handler = smu_v13_0_register_irq_handler,
2136 	.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
2137 	.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
2138 	.get_bamaco_support = aldebaran_get_bamaco_support,
2139 	.get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq,
2140 	.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
2141 	.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
2142 	.set_df_cstate = aldebaran_set_df_cstate,
2143 	.log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,
2144 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2145 	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2146 	.get_gpu_metrics = aldebaran_get_gpu_metrics,
2147 	.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
2148 	.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
2149 	.mode1_reset = aldebaran_mode1_reset,
2150 	.set_mp1_state = aldebaran_set_mp1_state,
2151 	.mode2_reset = aldebaran_mode2_reset,
2152 	.wait_for_event = smu_v13_0_wait_for_event,
2153 	.i2c_init = aldebaran_i2c_control_init,
2154 	.i2c_fini = aldebaran_i2c_control_fini,
2155 	.send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num,
2156 	.get_ecc_info = aldebaran_get_ecc_info,
2157 	.send_hbm_bad_channel_flag = aldebaran_send_hbm_bad_channel_flag,
2158 };
2159 
2160 void aldebaran_set_ppt_funcs(struct smu_context *smu)
2161 {
2162 	smu->ppt_funcs = &aldebaran_ppt_funcs;
2163 	smu->message_map = aldebaran_message_map;
2164 	smu->clock_map = aldebaran_clk_map;
2165 	smu->feature_map = aldebaran_feature_mask_map;
2166 	smu->table_map = aldebaran_table_map;
2167 	smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
2168 	smu_v13_0_set_smu_mailbox_registers(smu);
2169 }
2170