xref: /linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c (revision 323bbfcf1ef8836d0d2ad9e2c1f1c684f0e3b5b3)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
28 #include "atombios.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
31 #include "smu/smu_8_0_d.h"
32 #include "smu8_fusion.h"
33 #include "smu/smu_8_0_sh_mask.h"
34 #include "smumgr.h"
35 #include "hwmgr.h"
36 #include "hardwaremanager.h"
37 #include "cz_ppsmc.h"
38 #include "smu8_hwmgr.h"
39 #include "power_state.h"
40 #include "pp_thermal.h"
41 
42 #define ixSMUSVI_NB_CURRENTVID 0xD8230044
43 #define CURRENT_NB_VID_MASK 0xff000000
44 #define CURRENT_NB_VID__SHIFT 24
45 #define ixSMUSVI_GFX_CURRENTVID  0xD8230048
46 #define CURRENT_GFX_VID_MASK 0xff000000
47 #define CURRENT_GFX_VID__SHIFT 24
48 
49 static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
50 
cast_smu8_power_state(struct pp_hw_power_state * hw_ps)51 static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
52 {
53 	if (smu8_magic != hw_ps->magic)
54 		return NULL;
55 
56 	return (struct smu8_power_state *)hw_ps;
57 }
58 
cast_const_smu8_power_state(const struct pp_hw_power_state * hw_ps)59 static const struct smu8_power_state *cast_const_smu8_power_state(
60 				const struct pp_hw_power_state *hw_ps)
61 {
62 	if (smu8_magic != hw_ps->magic)
63 		return NULL;
64 
65 	return (struct smu8_power_state *)hw_ps;
66 }
67 
smu8_get_eclk_level(struct pp_hwmgr * hwmgr,uint32_t clock,uint32_t msg)68 static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
69 					uint32_t clock, uint32_t msg)
70 {
71 	int i = 0;
72 	struct phm_vce_clock_voltage_dependency_table *ptable =
73 		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
74 
75 	switch (msg) {
76 	case PPSMC_MSG_SetEclkSoftMin:
77 	case PPSMC_MSG_SetEclkHardMin:
78 		for (i = 0; i < (int)ptable->count; i++) {
79 			if (clock <= ptable->entries[i].ecclk)
80 				break;
81 		}
82 		break;
83 
84 	case PPSMC_MSG_SetEclkSoftMax:
85 	case PPSMC_MSG_SetEclkHardMax:
86 		for (i = ptable->count - 1; i >= 0; i--) {
87 			if (clock >= ptable->entries[i].ecclk)
88 				break;
89 		}
90 		break;
91 
92 	default:
93 		break;
94 	}
95 
96 	return i;
97 }
98 
smu8_get_sclk_level(struct pp_hwmgr * hwmgr,uint32_t clock,uint32_t msg)99 static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
100 				uint32_t clock, uint32_t msg)
101 {
102 	int i = 0;
103 	struct phm_clock_voltage_dependency_table *table =
104 				hwmgr->dyn_state.vddc_dependency_on_sclk;
105 
106 	switch (msg) {
107 	case PPSMC_MSG_SetSclkSoftMin:
108 	case PPSMC_MSG_SetSclkHardMin:
109 		for (i = 0; i < (int)table->count; i++) {
110 			if (clock <= table->entries[i].clk)
111 				break;
112 		}
113 		break;
114 
115 	case PPSMC_MSG_SetSclkSoftMax:
116 	case PPSMC_MSG_SetSclkHardMax:
117 		for (i = table->count - 1; i >= 0; i--) {
118 			if (clock >= table->entries[i].clk)
119 				break;
120 		}
121 		break;
122 
123 	default:
124 		break;
125 	}
126 	return i;
127 }
128 
smu8_get_uvd_level(struct pp_hwmgr * hwmgr,uint32_t clock,uint32_t msg)129 static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
130 					uint32_t clock, uint32_t msg)
131 {
132 	int i = 0;
133 	struct phm_uvd_clock_voltage_dependency_table *ptable =
134 		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
135 
136 	switch (msg) {
137 	case PPSMC_MSG_SetUvdSoftMin:
138 	case PPSMC_MSG_SetUvdHardMin:
139 		for (i = 0; i < (int)ptable->count; i++) {
140 			if (clock <= ptable->entries[i].vclk)
141 				break;
142 		}
143 		break;
144 
145 	case PPSMC_MSG_SetUvdSoftMax:
146 	case PPSMC_MSG_SetUvdHardMax:
147 		for (i = ptable->count - 1; i >= 0; i--) {
148 			if (clock >= ptable->entries[i].vclk)
149 				break;
150 		}
151 		break;
152 
153 	default:
154 		break;
155 	}
156 
157 	return i;
158 }
159 
smu8_get_max_sclk_level(struct pp_hwmgr * hwmgr)160 static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
161 {
162 	struct smu8_hwmgr *data = hwmgr->backend;
163 
164 	if (data->max_sclk_level == 0) {
165 		smum_send_msg_to_smc(hwmgr,
166 				PPSMC_MSG_GetMaxSclkLevel,
167 				&data->max_sclk_level);
168 		data->max_sclk_level += 1;
169 	}
170 
171 	return data->max_sclk_level;
172 }
173 
smu8_initialize_dpm_defaults(struct pp_hwmgr * hwmgr)174 static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
175 {
176 	struct smu8_hwmgr *data = hwmgr->backend;
177 	struct amdgpu_device *adev = hwmgr->adev;
178 
179 	data->gfx_ramp_step = 256*25/100;
180 	data->gfx_ramp_delay = 1; /* by default, we delay 1us */
181 
182 	data->mgcg_cgtt_local0 = 0x00000000;
183 	data->mgcg_cgtt_local1 = 0x00000000;
184 	data->clock_slow_down_freq = 25000;
185 	data->skip_clock_slow_down = 1;
186 	data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
187 	data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
188 	data->voting_rights_clients = 0x00C00033;
189 	data->static_screen_threshold = 8;
190 	data->ddi_power_gating_disabled = 0;
191 	data->bapm_enabled = 1;
192 	data->voltage_drop_threshold = 0;
193 	data->gfx_power_gating_threshold = 500;
194 	data->vce_slow_sclk_threshold = 20000;
195 	data->dce_slow_sclk_threshold = 30000;
196 	data->disable_driver_thermal_policy = 1;
197 	data->disable_nb_ps3_in_battery = 0;
198 
199 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
200 							PHM_PlatformCaps_ABM);
201 
202 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
203 				    PHM_PlatformCaps_NonABMSupportInPPLib);
204 
205 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
206 					PHM_PlatformCaps_DynamicM3Arbiter);
207 
208 	data->override_dynamic_mgpg = 1;
209 
210 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
211 				  PHM_PlatformCaps_DynamicPatchPowerState);
212 
213 	data->thermal_auto_throttling_treshold = 0;
214 	data->tdr_clock = 0;
215 	data->disable_gfx_power_gating_in_uvd = 0;
216 
217 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
218 					PHM_PlatformCaps_DynamicUVDState);
219 
220 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
221 			PHM_PlatformCaps_UVDDPM);
222 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
223 			PHM_PlatformCaps_VCEDPM);
224 
225 	data->cc6_settings.cpu_cc6_disable = false;
226 	data->cc6_settings.cpu_pstate_disable = false;
227 	data->cc6_settings.nb_pstate_switch_disable = false;
228 	data->cc6_settings.cpu_pstate_separation_time = 0;
229 
230 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 				   PHM_PlatformCaps_DisableVoltageIsland);
232 
233 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234 		      PHM_PlatformCaps_UVDPowerGating);
235 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236 		      PHM_PlatformCaps_VCEPowerGating);
237 
238 	if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
239 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
240 			      PHM_PlatformCaps_UVDPowerGating);
241 	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
242 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
243 			      PHM_PlatformCaps_VCEPowerGating);
244 
245 
246 	return 0;
247 }
248 
249 /* convert form 8bit vid to real voltage in mV*4 */
smu8_convert_8Bit_index_to_voltage(struct pp_hwmgr * hwmgr,uint16_t voltage)250 static uint32_t smu8_convert_8Bit_index_to_voltage(
251 			struct pp_hwmgr *hwmgr, uint16_t voltage)
252 {
253 	return 6200 - (voltage * 25);
254 }
255 
smu8_construct_max_power_limits_table(struct pp_hwmgr * hwmgr,struct phm_clock_and_voltage_limits * table)256 static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
257 			struct phm_clock_and_voltage_limits *table)
258 {
259 	struct smu8_hwmgr *data = hwmgr->backend;
260 	struct smu8_sys_info *sys_info = &data->sys_info;
261 	struct phm_clock_voltage_dependency_table *dep_table =
262 				hwmgr->dyn_state.vddc_dependency_on_sclk;
263 
264 	if (dep_table->count > 0) {
265 		table->sclk = dep_table->entries[dep_table->count-1].clk;
266 		table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
267 		   (uint16_t)dep_table->entries[dep_table->count-1].v);
268 	}
269 	table->mclk = sys_info->nbp_memory_clock[0];
270 	return 0;
271 }
272 
smu8_init_dynamic_state_adjustment_rule_settings(struct pp_hwmgr * hwmgr,ATOM_CLK_VOLT_CAPABILITY * disp_voltage_table)273 static int smu8_init_dynamic_state_adjustment_rule_settings(
274 			struct pp_hwmgr *hwmgr,
275 			ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
276 {
277 	struct phm_clock_voltage_dependency_table *table_clk_vlt;
278 
279 	table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 8);
280 
281 	if (NULL == table_clk_vlt) {
282 		pr_err("Can not allocate memory!\n");
283 		return -ENOMEM;
284 	}
285 
286 	table_clk_vlt->count = 8;
287 	table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
288 	table_clk_vlt->entries[0].v = 0;
289 	table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
290 	table_clk_vlt->entries[1].v = 1;
291 	table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
292 	table_clk_vlt->entries[2].v = 2;
293 	table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
294 	table_clk_vlt->entries[3].v = 3;
295 	table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
296 	table_clk_vlt->entries[4].v = 4;
297 	table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
298 	table_clk_vlt->entries[5].v = 5;
299 	table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
300 	table_clk_vlt->entries[6].v = 6;
301 	table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
302 	table_clk_vlt->entries[7].v = 7;
303 	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
304 
305 	return 0;
306 }
307 
smu8_get_system_info_data(struct pp_hwmgr * hwmgr)308 static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
309 {
310 	struct smu8_hwmgr *data = hwmgr->backend;
311 	ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
312 	uint32_t i;
313 	int result = 0;
314 	uint8_t frev, crev;
315 	uint16_t size;
316 
317 	info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
318 			GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
319 			&size, &frev, &crev);
320 
321 	if (info == NULL) {
322 		pr_err("Could not retrieve the Integrated System Info Table!\n");
323 		return -EINVAL;
324 	}
325 
326 	if (crev != 9) {
327 		pr_err("Unsupported IGP table: %d %d\n", frev, crev);
328 		return -EINVAL;
329 	}
330 
331 	data->sys_info.bootup_uma_clock =
332 				   le32_to_cpu(info->ulBootUpUMAClock);
333 
334 	data->sys_info.bootup_engine_clock =
335 				le32_to_cpu(info->ulBootUpEngineClock);
336 
337 	data->sys_info.dentist_vco_freq =
338 				   le32_to_cpu(info->ulDentistVCOFreq);
339 
340 	data->sys_info.system_config =
341 				     le32_to_cpu(info->ulSystemConfig);
342 
343 	data->sys_info.bootup_nb_voltage_index =
344 				  le16_to_cpu(info->usBootUpNBVoltage);
345 
346 	data->sys_info.htc_hyst_lmt =
347 			(info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
348 
349 	data->sys_info.htc_tmp_lmt =
350 			(info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
351 
352 	if (data->sys_info.htc_tmp_lmt <=
353 			data->sys_info.htc_hyst_lmt) {
354 		pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
355 		return -EINVAL;
356 	}
357 
358 	data->sys_info.nb_dpm_enable =
359 				data->enable_nb_ps_policy &&
360 				(le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
361 
362 	for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
363 		if (i < SMU8_NUM_NBPMEMORYCLOCK) {
364 			data->sys_info.nbp_memory_clock[i] =
365 			  le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
366 		}
367 		data->sys_info.nbp_n_clock[i] =
368 			    le32_to_cpu(info->ulNbpStateNClkFreq[i]);
369 	}
370 
371 	for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
372 		data->sys_info.display_clock[i] =
373 					le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
374 	}
375 
376 	/* Here use 4 levels, make sure not exceed */
377 	for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
378 		data->sys_info.nbp_voltage_index[i] =
379 			     le16_to_cpu(info->usNBPStateVoltage[i]);
380 	}
381 
382 	if (!data->sys_info.nb_dpm_enable) {
383 		for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
384 			if (i < SMU8_NUM_NBPMEMORYCLOCK) {
385 				data->sys_info.nbp_memory_clock[i] =
386 				    data->sys_info.nbp_memory_clock[0];
387 			}
388 			data->sys_info.nbp_n_clock[i] =
389 				    data->sys_info.nbp_n_clock[0];
390 			data->sys_info.nbp_voltage_index[i] =
391 				    data->sys_info.nbp_voltage_index[0];
392 		}
393 	}
394 
395 	if (le32_to_cpu(info->ulGPUCapInfo) &
396 		SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) {
397 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
398 				    PHM_PlatformCaps_EnableDFSBypass);
399 	}
400 
401 	data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
402 
403 	smu8_construct_max_power_limits_table (hwmgr,
404 				    &hwmgr->dyn_state.max_clock_voltage_on_ac);
405 
406 	smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
407 				    &info->sDISPCLK_Voltage[0]);
408 
409 	return result;
410 }
411 
smu8_construct_boot_state(struct pp_hwmgr * hwmgr)412 static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
413 {
414 	struct smu8_hwmgr *data = hwmgr->backend;
415 
416 	data->boot_power_level.engineClock =
417 				data->sys_info.bootup_engine_clock;
418 
419 	data->boot_power_level.vddcIndex =
420 			(uint8_t)data->sys_info.bootup_nb_voltage_index;
421 
422 	data->boot_power_level.dsDividerIndex = 0;
423 	data->boot_power_level.ssDividerIndex = 0;
424 	data->boot_power_level.allowGnbSlow = 1;
425 	data->boot_power_level.forceNBPstate = 0;
426 	data->boot_power_level.hysteresis_up = 0;
427 	data->boot_power_level.numSIMDToPowerDown = 0;
428 	data->boot_power_level.display_wm = 0;
429 	data->boot_power_level.vce_wm = 0;
430 
431 	return 0;
432 }
433 
smu8_upload_pptable_to_smu(struct pp_hwmgr * hwmgr)434 static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
435 {
436 	struct SMU8_Fusion_ClkTable *clock_table;
437 	int ret;
438 	uint32_t i;
439 	void *table = NULL;
440 	pp_atomctrl_clock_dividers_kong dividers;
441 
442 	struct phm_clock_voltage_dependency_table *vddc_table =
443 		hwmgr->dyn_state.vddc_dependency_on_sclk;
444 	struct phm_clock_voltage_dependency_table *vdd_gfx_table =
445 		hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
446 	struct phm_acp_clock_voltage_dependency_table *acp_table =
447 		hwmgr->dyn_state.acp_clock_voltage_dependency_table;
448 	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
449 		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
450 	struct phm_vce_clock_voltage_dependency_table *vce_table =
451 		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
452 
453 	if (!hwmgr->need_pp_table_upload)
454 		return 0;
455 
456 	ret = smum_download_powerplay_table(hwmgr, &table);
457 
458 	PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
459 			    "Fail to get clock table from SMU!", return -EINVAL;);
460 
461 	clock_table = (struct SMU8_Fusion_ClkTable *)table;
462 
463 	/* patch clock table */
464 	PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
465 			    "Dependency table entry exceeds max limit!", return -EINVAL;);
466 	PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
467 			    "Dependency table entry exceeds max limit!", return -EINVAL;);
468 	PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
469 			    "Dependency table entry exceeds max limit!", return -EINVAL;);
470 	PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
471 			    "Dependency table entry exceeds max limit!", return -EINVAL;);
472 	PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
473 			    "Dependency table entry exceeds max limit!", return -EINVAL;);
474 
475 	for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
476 
477 		/* vddc_sclk */
478 		clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
479 			(i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
480 		clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
481 			(i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
482 
483 		atomctrl_get_engine_pll_dividers_kong(hwmgr,
484 						      clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
485 						      &dividers);
486 
487 		clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
488 			(uint8_t)dividers.pll_post_divider;
489 
490 		/* vddgfx_sclk */
491 		clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
492 			(i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
493 
494 		/* acp breakdown */
495 		clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
496 			(i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
497 		clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
498 			(i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
499 
500 		atomctrl_get_engine_pll_dividers_kong(hwmgr,
501 						      clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
502 						      &dividers);
503 
504 		clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
505 			(uint8_t)dividers.pll_post_divider;
506 
507 
508 		/* uvd breakdown */
509 		clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
510 			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
511 		clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
512 			(i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
513 
514 		atomctrl_get_engine_pll_dividers_kong(hwmgr,
515 						      clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
516 						      &dividers);
517 
518 		clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
519 			(uint8_t)dividers.pll_post_divider;
520 
521 		clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
522 			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
523 		clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
524 			(i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
525 
526 		atomctrl_get_engine_pll_dividers_kong(hwmgr,
527 						      clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
528 						      &dividers);
529 
530 		clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
531 			(uint8_t)dividers.pll_post_divider;
532 
533 		/* vce breakdown */
534 		clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
535 			(i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
536 		clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
537 			(i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
538 
539 
540 		atomctrl_get_engine_pll_dividers_kong(hwmgr,
541 						      clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
542 						      &dividers);
543 
544 		clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
545 			(uint8_t)dividers.pll_post_divider;
546 
547 	}
548 	ret = smum_upload_powerplay_table(hwmgr);
549 
550 	return ret;
551 }
552 
smu8_init_sclk_limit(struct pp_hwmgr * hwmgr)553 static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
554 {
555 	struct smu8_hwmgr *data = hwmgr->backend;
556 	struct phm_clock_voltage_dependency_table *table =
557 					hwmgr->dyn_state.vddc_dependency_on_sclk;
558 	unsigned long clock = 0, level;
559 
560 	if (NULL == table || table->count <= 0)
561 		return -EINVAL;
562 
563 	data->sclk_dpm.soft_min_clk = table->entries[0].clk;
564 	data->sclk_dpm.hard_min_clk = table->entries[0].clk;
565 
566 	level = smu8_get_max_sclk_level(hwmgr) - 1;
567 
568 	if (level < table->count)
569 		clock = table->entries[level].clk;
570 	else
571 		clock = table->entries[table->count - 1].clk;
572 
573 	data->sclk_dpm.soft_max_clk = clock;
574 	data->sclk_dpm.hard_max_clk = clock;
575 
576 	return 0;
577 }
578 
smu8_init_uvd_limit(struct pp_hwmgr * hwmgr)579 static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
580 {
581 	struct smu8_hwmgr *data = hwmgr->backend;
582 	struct phm_uvd_clock_voltage_dependency_table *table =
583 				hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
584 	unsigned long clock = 0;
585 	uint32_t level;
586 	int ret;
587 
588 	if (NULL == table || table->count <= 0)
589 		return -EINVAL;
590 
591 	data->uvd_dpm.soft_min_clk = 0;
592 	data->uvd_dpm.hard_min_clk = 0;
593 
594 	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
595 	if (ret)
596 		return ret;
597 
598 	if (level < table->count)
599 		clock = table->entries[level].vclk;
600 	else
601 		clock = table->entries[table->count - 1].vclk;
602 
603 	data->uvd_dpm.soft_max_clk = clock;
604 	data->uvd_dpm.hard_max_clk = clock;
605 
606 	return 0;
607 }
608 
smu8_init_vce_limit(struct pp_hwmgr * hwmgr)609 static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
610 {
611 	struct smu8_hwmgr *data = hwmgr->backend;
612 	struct phm_vce_clock_voltage_dependency_table *table =
613 				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
614 	unsigned long clock = 0;
615 	uint32_t level;
616 	int ret;
617 
618 	if (NULL == table || table->count <= 0)
619 		return -EINVAL;
620 
621 	data->vce_dpm.soft_min_clk = 0;
622 	data->vce_dpm.hard_min_clk = 0;
623 
624 	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
625 	if (ret)
626 		return ret;
627 
628 	if (level < table->count)
629 		clock = table->entries[level].ecclk;
630 	else
631 		clock = table->entries[table->count - 1].ecclk;
632 
633 	data->vce_dpm.soft_max_clk = clock;
634 	data->vce_dpm.hard_max_clk = clock;
635 
636 	return 0;
637 }
638 
smu8_init_acp_limit(struct pp_hwmgr * hwmgr)639 static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
640 {
641 	struct smu8_hwmgr *data = hwmgr->backend;
642 	struct phm_acp_clock_voltage_dependency_table *table =
643 				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
644 	unsigned long clock = 0;
645 	uint32_t level;
646 	int ret;
647 
648 	if (NULL == table || table->count <= 0)
649 		return -EINVAL;
650 
651 	data->acp_dpm.soft_min_clk = 0;
652 	data->acp_dpm.hard_min_clk = 0;
653 
654 	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
655 	if (ret)
656 		return ret;
657 
658 	if (level < table->count)
659 		clock = table->entries[level].acpclk;
660 	else
661 		clock = table->entries[table->count - 1].acpclk;
662 
663 	data->acp_dpm.soft_max_clk = clock;
664 	data->acp_dpm.hard_max_clk = clock;
665 	return 0;
666 }
667 
smu8_init_power_gate_state(struct pp_hwmgr * hwmgr)668 static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
669 {
670 	struct smu8_hwmgr *data = hwmgr->backend;
671 
672 	data->uvd_power_gated = false;
673 	data->vce_power_gated = false;
674 	data->samu_power_gated = false;
675 #ifdef CONFIG_DRM_AMD_ACP
676 	data->acp_power_gated = false;
677 #else
678 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
679 	data->acp_power_gated = true;
680 #endif
681 
682 }
683 
smu8_init_sclk_threshold(struct pp_hwmgr * hwmgr)684 static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
685 {
686 	struct smu8_hwmgr *data = hwmgr->backend;
687 
688 	data->low_sclk_interrupt_threshold = 0;
689 }
690 
smu8_update_sclk_limit(struct pp_hwmgr * hwmgr)691 static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
692 {
693 	struct smu8_hwmgr *data = hwmgr->backend;
694 	struct phm_clock_voltage_dependency_table *table =
695 					hwmgr->dyn_state.vddc_dependency_on_sclk;
696 
697 	unsigned long clock = 0;
698 	unsigned long level;
699 	unsigned long stable_pstate_sclk;
700 	unsigned long percentage;
701 
702 	data->sclk_dpm.soft_min_clk = table->entries[0].clk;
703 	level = smu8_get_max_sclk_level(hwmgr) - 1;
704 
705 	if (level < table->count)
706 		data->sclk_dpm.soft_max_clk  = table->entries[level].clk;
707 	else
708 		data->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
709 
710 	clock = hwmgr->display_config->min_core_set_clock;
711 	if (clock == 0)
712 		pr_debug("min_core_set_clock not set\n");
713 
714 	if (data->sclk_dpm.hard_min_clk != clock) {
715 		data->sclk_dpm.hard_min_clk = clock;
716 
717 		smum_send_msg_to_smc_with_parameter(hwmgr,
718 						PPSMC_MSG_SetSclkHardMin,
719 						 smu8_get_sclk_level(hwmgr,
720 					data->sclk_dpm.hard_min_clk,
721 					     PPSMC_MSG_SetSclkHardMin),
722 						 NULL);
723 	}
724 
725 	clock = data->sclk_dpm.soft_min_clk;
726 
727 	/* update minimum clocks for Stable P-State feature */
728 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
729 				     PHM_PlatformCaps_StablePState)) {
730 		percentage = 75;
731 		/*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table  */
732 		stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
733 					percentage) / 100;
734 
735 		if (clock < stable_pstate_sclk)
736 			clock = stable_pstate_sclk;
737 	}
738 
739 	if (data->sclk_dpm.soft_min_clk != clock) {
740 		data->sclk_dpm.soft_min_clk = clock;
741 		smum_send_msg_to_smc_with_parameter(hwmgr,
742 						PPSMC_MSG_SetSclkSoftMin,
743 						smu8_get_sclk_level(hwmgr,
744 					data->sclk_dpm.soft_min_clk,
745 					     PPSMC_MSG_SetSclkSoftMin),
746 						NULL);
747 	}
748 
749 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
750 				    PHM_PlatformCaps_StablePState) &&
751 			 data->sclk_dpm.soft_max_clk != clock) {
752 		data->sclk_dpm.soft_max_clk = clock;
753 		smum_send_msg_to_smc_with_parameter(hwmgr,
754 						PPSMC_MSG_SetSclkSoftMax,
755 						smu8_get_sclk_level(hwmgr,
756 					data->sclk_dpm.soft_max_clk,
757 					PPSMC_MSG_SetSclkSoftMax),
758 						NULL);
759 	}
760 
761 	return 0;
762 }
763 
smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr * hwmgr)764 static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
765 {
766 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
767 				PHM_PlatformCaps_SclkDeepSleep)) {
768 		uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
769 		if (clks == 0)
770 			clks = SMU8_MIN_DEEP_SLEEP_SCLK;
771 
772 		PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
773 
774 		smum_send_msg_to_smc_with_parameter(hwmgr,
775 				PPSMC_MSG_SetMinDeepSleepSclk,
776 				clks,
777 				NULL);
778 	}
779 
780 	return 0;
781 }
782 
smu8_set_watermark_threshold(struct pp_hwmgr * hwmgr)783 static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
784 {
785 	struct smu8_hwmgr *data =
786 				  hwmgr->backend;
787 
788 	smum_send_msg_to_smc_with_parameter(hwmgr,
789 					PPSMC_MSG_SetWatermarkFrequency,
790 					data->sclk_dpm.soft_max_clk,
791 					NULL);
792 
793 	return 0;
794 }
795 
smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr * hwmgr,bool enable,bool lock)796 static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
797 {
798 	struct smu8_hwmgr *hw_data = hwmgr->backend;
799 
800 	if (hw_data->is_nb_dpm_enabled) {
801 		if (enable) {
802 			PP_DBG_LOG("enable Low Memory PState.\n");
803 
804 			return smum_send_msg_to_smc_with_parameter(hwmgr,
805 						PPSMC_MSG_EnableLowMemoryPstate,
806 						(lock ? 1 : 0),
807 						NULL);
808 		} else {
809 			PP_DBG_LOG("disable Low Memory PState.\n");
810 
811 			return smum_send_msg_to_smc_with_parameter(hwmgr,
812 						PPSMC_MSG_DisableLowMemoryPstate,
813 						(lock ? 1 : 0),
814 						NULL);
815 		}
816 	}
817 
818 	return 0;
819 }
820 
smu8_disable_nb_dpm(struct pp_hwmgr * hwmgr)821 static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
822 {
823 	int ret = 0;
824 
825 	struct smu8_hwmgr *data = hwmgr->backend;
826 	unsigned long dpm_features = 0;
827 
828 	if (data->is_nb_dpm_enabled) {
829 		smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
830 		dpm_features |= NB_DPM_MASK;
831 		ret = smum_send_msg_to_smc_with_parameter(
832 							  hwmgr,
833 							  PPSMC_MSG_DisableAllSmuFeatures,
834 							  dpm_features,
835 							  NULL);
836 		if (ret == 0)
837 			data->is_nb_dpm_enabled = false;
838 	}
839 
840 	return ret;
841 }
842 
smu8_enable_nb_dpm(struct pp_hwmgr * hwmgr)843 static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
844 {
845 	int ret = 0;
846 
847 	struct smu8_hwmgr *data = hwmgr->backend;
848 	unsigned long dpm_features = 0;
849 
850 	if (!data->is_nb_dpm_enabled) {
851 		PP_DBG_LOG("enabling ALL SMU features.\n");
852 		dpm_features |= NB_DPM_MASK;
853 		ret = smum_send_msg_to_smc_with_parameter(
854 							  hwmgr,
855 							  PPSMC_MSG_EnableAllSmuFeatures,
856 							  dpm_features,
857 							  NULL);
858 		if (ret == 0)
859 			data->is_nb_dpm_enabled = true;
860 	}
861 
862 	return ret;
863 }
864 
smu8_update_low_mem_pstate(struct pp_hwmgr * hwmgr,const void * input)865 static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
866 {
867 	bool disable_switch;
868 	bool enable_low_mem_state;
869 	struct smu8_hwmgr *hw_data = hwmgr->backend;
870 	const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
871 	const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
872 
873 	if (hw_data->sys_info.nb_dpm_enable) {
874 		disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
875 		enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
876 
877 		if (pnew_state->action == FORCE_HIGH)
878 			smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
879 		else if (pnew_state->action == CANCEL_FORCE_HIGH)
880 			smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
881 		else
882 			smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
883 	}
884 	return 0;
885 }
886 
smu8_set_power_state_tasks(struct pp_hwmgr * hwmgr,const void * input)887 static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
888 {
889 	int ret = 0;
890 
891 	smu8_update_sclk_limit(hwmgr);
892 	smu8_set_deep_sleep_sclk_threshold(hwmgr);
893 	smu8_set_watermark_threshold(hwmgr);
894 	ret = smu8_enable_nb_dpm(hwmgr);
895 	if (ret)
896 		return ret;
897 	smu8_update_low_mem_pstate(hwmgr, input);
898 
899 	return 0;
900 }
901 
902 
smu8_setup_asic_task(struct pp_hwmgr * hwmgr)903 static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
904 {
905 	int ret;
906 
907 	ret = smu8_upload_pptable_to_smu(hwmgr);
908 	if (ret)
909 		return ret;
910 	ret = smu8_init_sclk_limit(hwmgr);
911 	if (ret)
912 		return ret;
913 	ret = smu8_init_uvd_limit(hwmgr);
914 	if (ret)
915 		return ret;
916 	ret = smu8_init_vce_limit(hwmgr);
917 	if (ret)
918 		return ret;
919 	ret = smu8_init_acp_limit(hwmgr);
920 	if (ret)
921 		return ret;
922 
923 	smu8_init_power_gate_state(hwmgr);
924 	smu8_init_sclk_threshold(hwmgr);
925 
926 	return 0;
927 }
928 
smu8_power_up_display_clock_sys_pll(struct pp_hwmgr * hwmgr)929 static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
930 {
931 	struct smu8_hwmgr *hw_data = hwmgr->backend;
932 
933 	hw_data->disp_clk_bypass_pending = false;
934 	hw_data->disp_clk_bypass = false;
935 }
936 
smu8_clear_nb_dpm_flag(struct pp_hwmgr * hwmgr)937 static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
938 {
939 	struct smu8_hwmgr *hw_data = hwmgr->backend;
940 
941 	hw_data->is_nb_dpm_enabled = false;
942 }
943 
smu8_reset_cc6_data(struct pp_hwmgr * hwmgr)944 static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
945 {
946 	struct smu8_hwmgr *hw_data = hwmgr->backend;
947 
948 	hw_data->cc6_settings.cc6_setting_changed = false;
949 	hw_data->cc6_settings.cpu_pstate_separation_time = 0;
950 	hw_data->cc6_settings.cpu_cc6_disable = false;
951 	hw_data->cc6_settings.cpu_pstate_disable = false;
952 }
953 
smu8_program_voting_clients(struct pp_hwmgr * hwmgr)954 static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
955 {
956 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
957 				ixCG_FREQ_TRAN_VOTING_0,
958 				SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
959 }
960 
smu8_clear_voting_clients(struct pp_hwmgr * hwmgr)961 static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
962 {
963 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
964 				ixCG_FREQ_TRAN_VOTING_0, 0);
965 }
966 
smu8_start_dpm(struct pp_hwmgr * hwmgr)967 static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
968 {
969 	struct smu8_hwmgr *data = hwmgr->backend;
970 
971 	data->dpm_flags |= DPMFlags_SCLK_Enabled;
972 
973 	return smum_send_msg_to_smc_with_parameter(hwmgr,
974 				PPSMC_MSG_EnableAllSmuFeatures,
975 				SCLK_DPM_MASK,
976 				NULL);
977 }
978 
smu8_stop_dpm(struct pp_hwmgr * hwmgr)979 static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
980 {
981 	int ret = 0;
982 	struct smu8_hwmgr *data = hwmgr->backend;
983 	unsigned long dpm_features = 0;
984 
985 	if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
986 		dpm_features |= SCLK_DPM_MASK;
987 		data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
988 		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
989 					PPSMC_MSG_DisableAllSmuFeatures,
990 					dpm_features,
991 					NULL);
992 	}
993 	return ret;
994 }
995 
smu8_program_bootup_state(struct pp_hwmgr * hwmgr)996 static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
997 {
998 	struct smu8_hwmgr *data = hwmgr->backend;
999 
1000 	data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
1001 	data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
1002 
1003 	smum_send_msg_to_smc_with_parameter(hwmgr,
1004 				PPSMC_MSG_SetSclkSoftMin,
1005 				smu8_get_sclk_level(hwmgr,
1006 				data->sclk_dpm.soft_min_clk,
1007 				PPSMC_MSG_SetSclkSoftMin),
1008 				NULL);
1009 
1010 	smum_send_msg_to_smc_with_parameter(hwmgr,
1011 				PPSMC_MSG_SetSclkSoftMax,
1012 				smu8_get_sclk_level(hwmgr,
1013 				data->sclk_dpm.soft_max_clk,
1014 				PPSMC_MSG_SetSclkSoftMax),
1015 				NULL);
1016 
1017 	return 0;
1018 }
1019 
smu8_reset_acp_boot_level(struct pp_hwmgr * hwmgr)1020 static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
1021 {
1022 	struct smu8_hwmgr *data = hwmgr->backend;
1023 
1024 	data->acp_boot_level = 0xff;
1025 }
1026 
smu8_populate_umdpstate_clocks(struct pp_hwmgr * hwmgr)1027 static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
1028 {
1029 	struct phm_clock_voltage_dependency_table *table =
1030 				hwmgr->dyn_state.vddc_dependency_on_sclk;
1031 
1032 	hwmgr->pstate_sclk = table->entries[0].clk / 100;
1033 	hwmgr->pstate_mclk = 0;
1034 
1035 	hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100;
1036 	hwmgr->pstate_mclk_peak = 0;
1037 }
1038 
smu8_enable_dpm_tasks(struct pp_hwmgr * hwmgr)1039 static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1040 {
1041 	smu8_program_voting_clients(hwmgr);
1042 	if (smu8_start_dpm(hwmgr))
1043 		return -EINVAL;
1044 	smu8_program_bootup_state(hwmgr);
1045 	smu8_reset_acp_boot_level(hwmgr);
1046 
1047 	smu8_populate_umdpstate_clocks(hwmgr);
1048 
1049 	return 0;
1050 }
1051 
smu8_disable_dpm_tasks(struct pp_hwmgr * hwmgr)1052 static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1053 {
1054 	smu8_disable_nb_dpm(hwmgr);
1055 
1056 	smu8_clear_voting_clients(hwmgr);
1057 	if (smu8_stop_dpm(hwmgr))
1058 		return -EINVAL;
1059 
1060 	return 0;
1061 }
1062 
smu8_power_off_asic(struct pp_hwmgr * hwmgr)1063 static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
1064 {
1065 	smu8_disable_dpm_tasks(hwmgr);
1066 	smu8_power_up_display_clock_sys_pll(hwmgr);
1067 	smu8_clear_nb_dpm_flag(hwmgr);
1068 	smu8_reset_cc6_data(hwmgr);
1069 	return 0;
1070 }
1071 
smu8_apply_state_adjust_rules(struct pp_hwmgr * hwmgr,struct pp_power_state * prequest_ps,const struct pp_power_state * pcurrent_ps)1072 static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1073 				struct pp_power_state  *prequest_ps,
1074 			const struct pp_power_state *pcurrent_ps)
1075 {
1076 	struct smu8_power_state *smu8_ps;
1077 	const struct smu8_power_state *smu8_current_ps;
1078 	struct smu8_hwmgr *data = hwmgr->backend;
1079 	struct PP_Clocks clocks = {0, 0, 0, 0};
1080 	bool force_high;
1081 
1082 	smu8_ps = cast_smu8_power_state(&prequest_ps->hardware);
1083 	smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware);
1084 
1085 	if (!smu8_ps || !smu8_current_ps)
1086 		return -EINVAL;
1087 
1088 	smu8_ps->need_dfs_bypass = true;
1089 
1090 	data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1091 
1092 	clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
1093 				hwmgr->display_config->min_mem_set_clock :
1094 				data->sys_info.nbp_memory_clock[1];
1095 
1096 
1097 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1098 		clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1099 
1100 	force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
1101 			|| (hwmgr->display_config->num_display >= 3);
1102 
1103 	smu8_ps->action = smu8_current_ps->action;
1104 
1105 	if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1106 		smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
1107 	else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
1108 		smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
1109 	else if (!force_high && (smu8_ps->action == FORCE_HIGH))
1110 		smu8_ps->action = CANCEL_FORCE_HIGH;
1111 	else if (force_high && (smu8_ps->action != FORCE_HIGH))
1112 		smu8_ps->action = FORCE_HIGH;
1113 	else
1114 		smu8_ps->action = DO_NOTHING;
1115 
1116 	return 0;
1117 }
1118 
smu8_hwmgr_backend_init(struct pp_hwmgr * hwmgr)1119 static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1120 {
1121 	int result = 0;
1122 	struct smu8_hwmgr *data;
1123 
1124 	data = kzalloc_obj(struct smu8_hwmgr);
1125 	if (data == NULL)
1126 		return -ENOMEM;
1127 
1128 	hwmgr->backend = data;
1129 
1130 	result = smu8_initialize_dpm_defaults(hwmgr);
1131 	if (result != 0) {
1132 		pr_err("smu8_initialize_dpm_defaults failed\n");
1133 		return result;
1134 	}
1135 
1136 	result = smu8_get_system_info_data(hwmgr);
1137 	if (result != 0) {
1138 		pr_err("smu8_get_system_info_data failed\n");
1139 		return result;
1140 	}
1141 
1142 	smu8_construct_boot_state(hwmgr);
1143 
1144 	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =  SMU8_MAX_HARDWARE_POWERLEVELS;
1145 
1146 	return result;
1147 }
1148 
smu8_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)1149 static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1150 {
1151 	if (hwmgr != NULL) {
1152 		kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
1153 		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
1154 
1155 		kfree(hwmgr->backend);
1156 		hwmgr->backend = NULL;
1157 	}
1158 	return 0;
1159 }
1160 
smu8_phm_force_dpm_highest(struct pp_hwmgr * hwmgr)1161 static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1162 {
1163 	struct smu8_hwmgr *data = hwmgr->backend;
1164 
1165 	smum_send_msg_to_smc_with_parameter(hwmgr,
1166 					PPSMC_MSG_SetSclkSoftMin,
1167 					smu8_get_sclk_level(hwmgr,
1168 					data->sclk_dpm.soft_max_clk,
1169 					PPSMC_MSG_SetSclkSoftMin),
1170 					NULL);
1171 
1172 	smum_send_msg_to_smc_with_parameter(hwmgr,
1173 				PPSMC_MSG_SetSclkSoftMax,
1174 				smu8_get_sclk_level(hwmgr,
1175 				data->sclk_dpm.soft_max_clk,
1176 				PPSMC_MSG_SetSclkSoftMax),
1177 				NULL);
1178 
1179 	return 0;
1180 }
1181 
smu8_phm_unforce_dpm_levels(struct pp_hwmgr * hwmgr)1182 static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1183 {
1184 	struct smu8_hwmgr *data = hwmgr->backend;
1185 	struct phm_clock_voltage_dependency_table *table =
1186 				hwmgr->dyn_state.vddc_dependency_on_sclk;
1187 	unsigned long clock = 0, level;
1188 
1189 	if (NULL == table || table->count <= 0)
1190 		return -EINVAL;
1191 
1192 	data->sclk_dpm.soft_min_clk = table->entries[0].clk;
1193 	data->sclk_dpm.hard_min_clk = table->entries[0].clk;
1194 
1195 	level = smu8_get_max_sclk_level(hwmgr) - 1;
1196 
1197 	if (level < table->count)
1198 		clock = table->entries[level].clk;
1199 	else
1200 		clock = table->entries[table->count - 1].clk;
1201 
1202 	data->sclk_dpm.soft_max_clk = clock;
1203 	data->sclk_dpm.hard_max_clk = clock;
1204 
1205 	smum_send_msg_to_smc_with_parameter(hwmgr,
1206 				PPSMC_MSG_SetSclkSoftMin,
1207 				smu8_get_sclk_level(hwmgr,
1208 				data->sclk_dpm.soft_min_clk,
1209 				PPSMC_MSG_SetSclkSoftMin),
1210 				NULL);
1211 
1212 	smum_send_msg_to_smc_with_parameter(hwmgr,
1213 				PPSMC_MSG_SetSclkSoftMax,
1214 				smu8_get_sclk_level(hwmgr,
1215 				data->sclk_dpm.soft_max_clk,
1216 				PPSMC_MSG_SetSclkSoftMax),
1217 				NULL);
1218 
1219 	return 0;
1220 }
1221 
smu8_phm_force_dpm_lowest(struct pp_hwmgr * hwmgr)1222 static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1223 {
1224 	struct smu8_hwmgr *data = hwmgr->backend;
1225 
1226 	smum_send_msg_to_smc_with_parameter(hwmgr,
1227 			PPSMC_MSG_SetSclkSoftMax,
1228 			smu8_get_sclk_level(hwmgr,
1229 			data->sclk_dpm.soft_min_clk,
1230 			PPSMC_MSG_SetSclkSoftMax),
1231 			NULL);
1232 
1233 	smum_send_msg_to_smc_with_parameter(hwmgr,
1234 				PPSMC_MSG_SetSclkSoftMin,
1235 				smu8_get_sclk_level(hwmgr,
1236 				data->sclk_dpm.soft_min_clk,
1237 				PPSMC_MSG_SetSclkSoftMin),
1238 				NULL);
1239 
1240 	return 0;
1241 }
1242 
smu8_dpm_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)1243 static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1244 				enum amd_dpm_forced_level level)
1245 {
1246 	int ret = 0;
1247 
1248 	switch (level) {
1249 	case AMD_DPM_FORCED_LEVEL_HIGH:
1250 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1251 		ret = smu8_phm_force_dpm_highest(hwmgr);
1252 		break;
1253 	case AMD_DPM_FORCED_LEVEL_LOW:
1254 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1255 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1256 		ret = smu8_phm_force_dpm_lowest(hwmgr);
1257 		break;
1258 	case AMD_DPM_FORCED_LEVEL_AUTO:
1259 		ret = smu8_phm_unforce_dpm_levels(hwmgr);
1260 		break;
1261 	case AMD_DPM_FORCED_LEVEL_MANUAL:
1262 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1263 	default:
1264 		break;
1265 	}
1266 
1267 	return ret;
1268 }
1269 
smu8_dpm_powerdown_uvd(struct pp_hwmgr * hwmgr)1270 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1271 {
1272 	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1273 		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL);
1274 	return 0;
1275 }
1276 
smu8_dpm_powerup_uvd(struct pp_hwmgr * hwmgr)1277 static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1278 {
1279 	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1280 		return smum_send_msg_to_smc_with_parameter(
1281 			hwmgr,
1282 			PPSMC_MSG_UVDPowerON,
1283 			PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0,
1284 			NULL);
1285 	}
1286 
1287 	return 0;
1288 }
1289 
smu8_dpm_update_vce_dpm(struct pp_hwmgr * hwmgr)1290 static int  smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1291 {
1292 	struct smu8_hwmgr *data = hwmgr->backend;
1293 	struct phm_vce_clock_voltage_dependency_table *ptable =
1294 		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1295 
1296 	/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1297 	if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1298 	    hwmgr->en_umd_pstate) {
1299 		data->vce_dpm.hard_min_clk =
1300 				  ptable->entries[ptable->count - 1].ecclk;
1301 
1302 		smum_send_msg_to_smc_with_parameter(hwmgr,
1303 			PPSMC_MSG_SetEclkHardMin,
1304 			smu8_get_eclk_level(hwmgr,
1305 				data->vce_dpm.hard_min_clk,
1306 				PPSMC_MSG_SetEclkHardMin),
1307 			NULL);
1308 	} else {
1309 
1310 		smum_send_msg_to_smc_with_parameter(hwmgr,
1311 					PPSMC_MSG_SetEclkHardMin,
1312 					0,
1313 					NULL);
1314 		/* disable ECLK DPM 0. Otherwise VCE could hang if
1315 		 * switching SCLK from DPM 0 to 6/7 */
1316 		smum_send_msg_to_smc_with_parameter(hwmgr,
1317 					PPSMC_MSG_SetEclkSoftMin,
1318 					1,
1319 					NULL);
1320 	}
1321 	return 0;
1322 }
1323 
smu8_dpm_powerdown_vce(struct pp_hwmgr * hwmgr)1324 static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1325 {
1326 	if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1327 		return smum_send_msg_to_smc(hwmgr,
1328 					    PPSMC_MSG_VCEPowerOFF,
1329 					    NULL);
1330 	return 0;
1331 }
1332 
smu8_dpm_powerup_vce(struct pp_hwmgr * hwmgr)1333 static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1334 {
1335 	if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1336 		return smum_send_msg_to_smc(hwmgr,
1337 					    PPSMC_MSG_VCEPowerON,
1338 					    NULL);
1339 	return 0;
1340 }
1341 
smu8_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)1342 static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1343 {
1344 	struct smu8_hwmgr *data = hwmgr->backend;
1345 
1346 	return data->sys_info.bootup_uma_clock;
1347 }
1348 
smu8_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)1349 static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1350 {
1351 	struct pp_power_state  *ps;
1352 	struct smu8_power_state  *smu8_ps;
1353 
1354 	if (hwmgr == NULL)
1355 		return -EINVAL;
1356 
1357 	ps = hwmgr->request_ps;
1358 
1359 	if (ps == NULL)
1360 		return -EINVAL;
1361 
1362 	smu8_ps = cast_smu8_power_state(&ps->hardware);
1363 
1364 	if (low)
1365 		return smu8_ps->levels[0].engineClock;
1366 	else
1367 		return smu8_ps->levels[smu8_ps->level-1].engineClock;
1368 }
1369 
smu8_dpm_patch_boot_state(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps)1370 static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1371 					struct pp_hw_power_state *hw_ps)
1372 {
1373 	struct smu8_hwmgr *data = hwmgr->backend;
1374 	struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1375 
1376 	smu8_ps->level = 1;
1377 	smu8_ps->nbps_flags = 0;
1378 	smu8_ps->bapm_flags = 0;
1379 	smu8_ps->levels[0] = data->boot_power_level;
1380 
1381 	return 0;
1382 }
1383 
smu8_dpm_get_pp_table_entry_callback(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps,unsigned int index,const void * clock_info)1384 static int smu8_dpm_get_pp_table_entry_callback(
1385 						     struct pp_hwmgr *hwmgr,
1386 					   struct pp_hw_power_state *hw_ps,
1387 							  unsigned int index,
1388 						     const void *clock_info)
1389 {
1390 	struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1391 
1392 	const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
1393 
1394 	struct phm_clock_voltage_dependency_table *table =
1395 				    hwmgr->dyn_state.vddc_dependency_on_sclk;
1396 	uint8_t clock_info_index = smu8_clock_info->index;
1397 
1398 	if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
1399 		clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
1400 
1401 	smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
1402 	smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1403 
1404 	smu8_ps->level = index + 1;
1405 
1406 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1407 		smu8_ps->levels[index].dsDividerIndex = 5;
1408 		smu8_ps->levels[index].ssDividerIndex = 5;
1409 	}
1410 
1411 	return 0;
1412 }
1413 
smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr * hwmgr)1414 static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1415 {
1416 	int result;
1417 	unsigned long ret = 0;
1418 
1419 	result = pp_tables_get_num_of_entries(hwmgr, &ret);
1420 
1421 	return result ? 0 : ret;
1422 }
1423 
smu8_dpm_get_pp_table_entry(struct pp_hwmgr * hwmgr,unsigned long entry,struct pp_power_state * ps)1424 static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1425 		    unsigned long entry, struct pp_power_state *ps)
1426 {
1427 	int result;
1428 	struct smu8_power_state *smu8_ps;
1429 
1430 	ps->hardware.magic = smu8_magic;
1431 
1432 	smu8_ps = cast_smu8_power_state(&(ps->hardware));
1433 
1434 	result = pp_tables_get_entry(hwmgr, entry, ps,
1435 			smu8_dpm_get_pp_table_entry_callback);
1436 
1437 	smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
1438 	smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1439 
1440 	return result;
1441 }
1442 
smu8_get_power_state_size(struct pp_hwmgr * hwmgr)1443 static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
1444 {
1445 	return sizeof(struct smu8_power_state);
1446 }
1447 
smu8_hw_print_display_cfg(const struct cc6_settings * cc6_settings)1448 static void smu8_hw_print_display_cfg(
1449 	const struct cc6_settings *cc6_settings)
1450 {
1451 	PP_DBG_LOG("New Display Configuration:\n");
1452 
1453 	PP_DBG_LOG("   cpu_cc6_disable: %d\n",
1454 			cc6_settings->cpu_cc6_disable);
1455 	PP_DBG_LOG("   cpu_pstate_disable: %d\n",
1456 			cc6_settings->cpu_pstate_disable);
1457 	PP_DBG_LOG("   nb_pstate_switch_disable: %d\n",
1458 			cc6_settings->nb_pstate_switch_disable);
1459 	PP_DBG_LOG("   cpu_pstate_separation_time: %d\n\n",
1460 			cc6_settings->cpu_pstate_separation_time);
1461 }
1462 
smu8_set_cpu_power_state(struct pp_hwmgr * hwmgr)1463  static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1464 {
1465 	struct smu8_hwmgr *hw_data = hwmgr->backend;
1466 	uint32_t data = 0;
1467 
1468 	if (hw_data->cc6_settings.cc6_setting_changed) {
1469 
1470 		hw_data->cc6_settings.cc6_setting_changed = false;
1471 
1472 		smu8_hw_print_display_cfg(&hw_data->cc6_settings);
1473 
1474 		data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1475 			& PWRMGT_SEPARATION_TIME_MASK)
1476 			<< PWRMGT_SEPARATION_TIME_SHIFT;
1477 
1478 		data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
1479 			<< PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
1480 
1481 		data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
1482 			<< PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
1483 
1484 		PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
1485 			data);
1486 
1487 		smum_send_msg_to_smc_with_parameter(hwmgr,
1488 						PPSMC_MSG_SetDisplaySizePowerParams,
1489 						data,
1490 						NULL);
1491 	}
1492 
1493 	return 0;
1494 }
1495 
1496 
smu8_store_cc6_data(struct pp_hwmgr * hwmgr,uint32_t separation_time,bool cc6_disable,bool pstate_disable,bool pstate_switch_disable)1497 static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1498 			bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1499 {
1500 	struct smu8_hwmgr *hw_data = hwmgr->backend;
1501 
1502 	if (separation_time !=
1503 	    hw_data->cc6_settings.cpu_pstate_separation_time ||
1504 	    cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
1505 	    pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
1506 	    pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
1507 
1508 		hw_data->cc6_settings.cc6_setting_changed = true;
1509 
1510 		hw_data->cc6_settings.cpu_pstate_separation_time =
1511 			separation_time;
1512 		hw_data->cc6_settings.cpu_cc6_disable =
1513 			cc6_disable;
1514 		hw_data->cc6_settings.cpu_pstate_disable =
1515 			pstate_disable;
1516 		hw_data->cc6_settings.nb_pstate_switch_disable =
1517 			pstate_switch_disable;
1518 
1519 	}
1520 
1521 	return 0;
1522 }
1523 
smu8_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)1524 static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
1525 		struct amd_pp_simple_clock_info *info)
1526 {
1527 	uint32_t i;
1528 	const struct phm_clock_voltage_dependency_table *table =
1529 			hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
1530 	const struct phm_clock_and_voltage_limits *limits =
1531 			&hwmgr->dyn_state.max_clock_voltage_on_ac;
1532 
1533 	info->engine_max_clock = limits->sclk;
1534 	info->memory_max_clock = limits->mclk;
1535 
1536 	for (i = table->count - 1; i > 0; i--) {
1537 		if (limits->vddc >= table->entries[i].v) {
1538 			info->level = table->entries[i].clk;
1539 			return 0;
1540 		}
1541 	}
1542 	return -EINVAL;
1543 }
1544 
smu8_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)1545 static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
1546 		enum pp_clock_type type, uint32_t mask)
1547 {
1548 	switch (type) {
1549 	case PP_SCLK:
1550 		smum_send_msg_to_smc_with_parameter(hwmgr,
1551 				PPSMC_MSG_SetSclkSoftMin,
1552 				mask,
1553 				NULL);
1554 		smum_send_msg_to_smc_with_parameter(hwmgr,
1555 				PPSMC_MSG_SetSclkSoftMax,
1556 				mask,
1557 				NULL);
1558 		break;
1559 	default:
1560 		break;
1561 	}
1562 
1563 	return 0;
1564 }
1565 
smu8_emit_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf,int * offset)1566 static int smu8_emit_clock_levels(struct pp_hwmgr *hwmgr,
1567 				  enum pp_clock_type type, char *buf,
1568 				  int *offset)
1569 {
1570 	struct smu8_hwmgr *data = hwmgr->backend;
1571 	struct phm_clock_voltage_dependency_table *sclk_table =
1572 			hwmgr->dyn_state.vddc_dependency_on_sclk;
1573 	uint32_t i, now;
1574 	int size = *offset;
1575 
1576 	switch (type) {
1577 	case PP_SCLK:
1578 		now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1579 				CGS_IND_REG__SMC,
1580 				ixTARGET_AND_CURRENT_PROFILE_INDEX),
1581 				TARGET_AND_CURRENT_PROFILE_INDEX,
1582 				CURR_SCLK_INDEX);
1583 
1584 		for (i = 0; i < sclk_table->count; i++)
1585 			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
1586 					      sclk_table->entries[i].clk / 100,
1587 					      (i == now) ? "*" : "");
1588 		break;
1589 	case PP_MCLK:
1590 		now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1591 				CGS_IND_REG__SMC,
1592 				ixTARGET_AND_CURRENT_PROFILE_INDEX),
1593 				TARGET_AND_CURRENT_PROFILE_INDEX,
1594 				CURR_MCLK_INDEX);
1595 
1596 		for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
1597 			size += sysfs_emit_at(
1598 				buf, size, "%d: %uMhz %s\n",
1599 				SMU8_NUM_NBPMEMORYCLOCK - i,
1600 				data->sys_info.nbp_memory_clock[i - 1] / 100,
1601 				(SMU8_NUM_NBPMEMORYCLOCK - i == now) ? "*" :
1602 								       "");
1603 		break;
1604 	default:
1605 		break;
1606 	}
1607 
1608 	*offset = size;
1609 
1610 	return 0;
1611 }
1612 
smu8_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)1613 static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1614 				PHM_PerformanceLevelDesignation designation, uint32_t index,
1615 				PHM_PerformanceLevel *level)
1616 {
1617 	const struct smu8_power_state *ps;
1618 	struct smu8_hwmgr *data;
1619 	uint32_t level_index;
1620 	uint32_t i;
1621 
1622 	if (level == NULL || hwmgr == NULL || state == NULL)
1623 		return -EINVAL;
1624 
1625 	data = hwmgr->backend;
1626 	ps = cast_const_smu8_power_state(state);
1627 
1628 	level_index = index > ps->level - 1 ? ps->level - 1 : index;
1629 	level->coreClock = ps->levels[level_index].engineClock;
1630 
1631 	if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
1632 		for (i = 1; i < ps->level; i++) {
1633 			if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
1634 				level->coreClock = ps->levels[i].engineClock;
1635 				break;
1636 			}
1637 		}
1638 	}
1639 
1640 	if (level_index == 0)
1641 		level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
1642 	else
1643 		level->memory_clock = data->sys_info.nbp_memory_clock[0];
1644 
1645 	level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
1646 	level->nonLocalMemoryFreq = 0;
1647 	level->nonLocalMemoryWidth = 0;
1648 
1649 	return 0;
1650 }
1651 
smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,struct pp_clock_info * clock_info)1652 static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1653 	const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1654 {
1655 	const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
1656 
1657 	clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
1658 	clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
1659 
1660 	return 0;
1661 }
1662 
smu8_get_clock_by_type(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1663 static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
1664 						struct amd_pp_clocks *clocks)
1665 {
1666 	struct smu8_hwmgr *data = hwmgr->backend;
1667 	int i;
1668 	struct phm_clock_voltage_dependency_table *table;
1669 
1670 	clocks->count = smu8_get_max_sclk_level(hwmgr);
1671 	switch (type) {
1672 	case amd_pp_disp_clock:
1673 		for (i = 0; i < clocks->count; i++)
1674 			clocks->clock[i] = data->sys_info.display_clock[i] * 10;
1675 		break;
1676 	case amd_pp_sys_clock:
1677 		table = hwmgr->dyn_state.vddc_dependency_on_sclk;
1678 		for (i = 0; i < clocks->count; i++)
1679 			clocks->clock[i] = table->entries[i].clk * 10;
1680 		break;
1681 	case amd_pp_mem_clock:
1682 		clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
1683 		for (i = 0; i < clocks->count; i++)
1684 			clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
1685 		break;
1686 	default:
1687 		return -1;
1688 	}
1689 
1690 	return 0;
1691 }
1692 
smu8_get_max_high_clocks(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * clocks)1693 static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1694 {
1695 	struct phm_clock_voltage_dependency_table *table =
1696 					hwmgr->dyn_state.vddc_dependency_on_sclk;
1697 	unsigned long level;
1698 	const struct phm_clock_and_voltage_limits *limits =
1699 			&hwmgr->dyn_state.max_clock_voltage_on_ac;
1700 
1701 	if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
1702 		return -EINVAL;
1703 
1704 	level = smu8_get_max_sclk_level(hwmgr) - 1;
1705 
1706 	if (level < table->count)
1707 		clocks->engine_max_clock = table->entries[level].clk;
1708 	else
1709 		clocks->engine_max_clock = table->entries[table->count - 1].clk;
1710 
1711 	clocks->memory_max_clock = limits->mclk;
1712 
1713 	return 0;
1714 }
1715 
smu8_thermal_get_temperature(struct pp_hwmgr * hwmgr)1716 static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1717 {
1718 	int actual_temp = 0;
1719 	uint32_t val = cgs_read_ind_register(hwmgr->device,
1720 					     CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
1721 	uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1722 
1723 	if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1724 		actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1725 	else
1726 		actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1727 
1728 	return actual_temp;
1729 }
1730 
smu8_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)1731 static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1732 			  void *value, int *size)
1733 {
1734 	struct smu8_hwmgr *data = hwmgr->backend;
1735 
1736 	struct phm_clock_voltage_dependency_table *table =
1737 				hwmgr->dyn_state.vddc_dependency_on_sclk;
1738 
1739 	struct phm_vce_clock_voltage_dependency_table *vce_table =
1740 		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1741 
1742 	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1743 		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1744 
1745 	uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
1746 					TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
1747 	uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1748 					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
1749 	uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1750 					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
1751 
1752 	uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
1753 	uint16_t vddnb, vddgfx;
1754 	int result;
1755 
1756 	/* size must be at least 4 bytes for all sensors */
1757 	if (*size < 4)
1758 		return -EINVAL;
1759 	*size = 4;
1760 
1761 	switch (idx) {
1762 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1763 		if (sclk_index < NUM_SCLK_LEVELS) {
1764 			sclk = table->entries[sclk_index].clk;
1765 			*((uint32_t *)value) = sclk;
1766 			return 0;
1767 		}
1768 		return -EINVAL;
1769 	case AMDGPU_PP_SENSOR_VDDNB:
1770 		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1771 			CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1772 		vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
1773 		*((uint32_t *)value) = vddnb;
1774 		return 0;
1775 	case AMDGPU_PP_SENSOR_VDDGFX:
1776 		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1777 			CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1778 		vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
1779 		*((uint32_t *)value) = vddgfx;
1780 		return 0;
1781 	case AMDGPU_PP_SENSOR_UVD_VCLK:
1782 		if (!data->uvd_power_gated) {
1783 			if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1784 				return -EINVAL;
1785 			} else {
1786 				vclk = uvd_table->entries[uvd_index].vclk;
1787 				*((uint32_t *)value) = vclk;
1788 				return 0;
1789 			}
1790 		}
1791 		*((uint32_t *)value) = 0;
1792 		return 0;
1793 	case AMDGPU_PP_SENSOR_UVD_DCLK:
1794 		if (!data->uvd_power_gated) {
1795 			if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1796 				return -EINVAL;
1797 			} else {
1798 				dclk = uvd_table->entries[uvd_index].dclk;
1799 				*((uint32_t *)value) = dclk;
1800 				return 0;
1801 			}
1802 		}
1803 		*((uint32_t *)value) = 0;
1804 		return 0;
1805 	case AMDGPU_PP_SENSOR_VCE_ECCLK:
1806 		if (!data->vce_power_gated) {
1807 			if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1808 				return -EINVAL;
1809 			} else {
1810 				ecclk = vce_table->entries[vce_index].ecclk;
1811 				*((uint32_t *)value) = ecclk;
1812 				return 0;
1813 			}
1814 		}
1815 		*((uint32_t *)value) = 0;
1816 		return 0;
1817 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1818 		result = smum_send_msg_to_smc(hwmgr,
1819 				PPSMC_MSG_GetAverageGraphicsActivity,
1820 				&activity_percent);
1821 		if (0 == result)
1822 			activity_percent = activity_percent > 100 ? 100 : activity_percent;
1823 		else
1824 			return -EIO;
1825 		*((uint32_t *)value) = activity_percent;
1826 		return 0;
1827 	case AMDGPU_PP_SENSOR_UVD_POWER:
1828 		*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1829 		return 0;
1830 	case AMDGPU_PP_SENSOR_VCE_POWER:
1831 		*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1832 		return 0;
1833 	case AMDGPU_PP_SENSOR_GPU_TEMP:
1834 		*((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
1835 		return 0;
1836 	default:
1837 		return -EOPNOTSUPP;
1838 	}
1839 }
1840 
smu8_notify_cac_buffer_info(struct pp_hwmgr * hwmgr,uint32_t virtual_addr_low,uint32_t virtual_addr_hi,uint32_t mc_addr_low,uint32_t mc_addr_hi,uint32_t size)1841 static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1842 					uint32_t virtual_addr_low,
1843 					uint32_t virtual_addr_hi,
1844 					uint32_t mc_addr_low,
1845 					uint32_t mc_addr_hi,
1846 					uint32_t size)
1847 {
1848 	smum_send_msg_to_smc_with_parameter(hwmgr,
1849 					PPSMC_MSG_DramAddrHiVirtual,
1850 					mc_addr_hi,
1851 					NULL);
1852 	smum_send_msg_to_smc_with_parameter(hwmgr,
1853 					PPSMC_MSG_DramAddrLoVirtual,
1854 					mc_addr_low,
1855 					NULL);
1856 	smum_send_msg_to_smc_with_parameter(hwmgr,
1857 					PPSMC_MSG_DramAddrHiPhysical,
1858 					virtual_addr_hi,
1859 					NULL);
1860 	smum_send_msg_to_smc_with_parameter(hwmgr,
1861 					PPSMC_MSG_DramAddrLoPhysical,
1862 					virtual_addr_low,
1863 					NULL);
1864 
1865 	smum_send_msg_to_smc_with_parameter(hwmgr,
1866 					PPSMC_MSG_DramBufferSize,
1867 					size,
1868 					NULL);
1869 	return 0;
1870 }
1871 
smu8_get_thermal_temperature_range(struct pp_hwmgr * hwmgr,struct PP_TemperatureRange * thermal_data)1872 static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
1873 		struct PP_TemperatureRange *thermal_data)
1874 {
1875 	struct smu8_hwmgr *data = hwmgr->backend;
1876 
1877 	memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
1878 
1879 	thermal_data->max = (data->thermal_auto_throttling_treshold +
1880 			data->sys_info.htc_hyst_lmt) *
1881 			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1882 
1883 	return 0;
1884 }
1885 
smu8_enable_disable_uvd_dpm(struct pp_hwmgr * hwmgr,bool enable)1886 static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
1887 {
1888 	struct smu8_hwmgr *data = hwmgr->backend;
1889 	uint32_t dpm_features = 0;
1890 
1891 	if (enable &&
1892 		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1893 				  PHM_PlatformCaps_UVDDPM)) {
1894 		data->dpm_flags |= DPMFlags_UVD_Enabled;
1895 		dpm_features |= UVD_DPM_MASK;
1896 		smum_send_msg_to_smc_with_parameter(hwmgr,
1897 			    PPSMC_MSG_EnableAllSmuFeatures,
1898 			    dpm_features,
1899 			    NULL);
1900 	} else {
1901 		dpm_features |= UVD_DPM_MASK;
1902 		data->dpm_flags &= ~DPMFlags_UVD_Enabled;
1903 		smum_send_msg_to_smc_with_parameter(hwmgr,
1904 			   PPSMC_MSG_DisableAllSmuFeatures,
1905 			   dpm_features,
1906 			   NULL);
1907 	}
1908 	return 0;
1909 }
1910 
smu8_dpm_update_uvd_dpm(struct pp_hwmgr * hwmgr,bool bgate)1911 static int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1912 {
1913 	struct smu8_hwmgr *data = hwmgr->backend;
1914 	struct phm_uvd_clock_voltage_dependency_table *ptable =
1915 		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1916 
1917 	if (!bgate) {
1918 		/* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1919 		if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1920 		    hwmgr->en_umd_pstate) {
1921 			data->uvd_dpm.hard_min_clk =
1922 				   ptable->entries[ptable->count - 1].vclk;
1923 
1924 			smum_send_msg_to_smc_with_parameter(hwmgr,
1925 				PPSMC_MSG_SetUvdHardMin,
1926 				smu8_get_uvd_level(hwmgr,
1927 					data->uvd_dpm.hard_min_clk,
1928 					PPSMC_MSG_SetUvdHardMin),
1929 				NULL);
1930 
1931 			smu8_enable_disable_uvd_dpm(hwmgr, true);
1932 		} else {
1933 			smu8_enable_disable_uvd_dpm(hwmgr, true);
1934 		}
1935 	} else {
1936 		smu8_enable_disable_uvd_dpm(hwmgr, false);
1937 	}
1938 
1939 	return 0;
1940 }
1941 
smu8_enable_disable_vce_dpm(struct pp_hwmgr * hwmgr,bool enable)1942 static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1943 {
1944 	struct smu8_hwmgr *data = hwmgr->backend;
1945 	uint32_t dpm_features = 0;
1946 
1947 	if (enable && phm_cap_enabled(
1948 				hwmgr->platform_descriptor.platformCaps,
1949 				PHM_PlatformCaps_VCEDPM)) {
1950 		data->dpm_flags |= DPMFlags_VCE_Enabled;
1951 		dpm_features |= VCE_DPM_MASK;
1952 		smum_send_msg_to_smc_with_parameter(hwmgr,
1953 			    PPSMC_MSG_EnableAllSmuFeatures,
1954 			    dpm_features,
1955 			    NULL);
1956 	} else {
1957 		dpm_features |= VCE_DPM_MASK;
1958 		data->dpm_flags &= ~DPMFlags_VCE_Enabled;
1959 		smum_send_msg_to_smc_with_parameter(hwmgr,
1960 			   PPSMC_MSG_DisableAllSmuFeatures,
1961 			   dpm_features,
1962 			   NULL);
1963 	}
1964 
1965 	return 0;
1966 }
1967 
1968 
smu8_dpm_powergate_acp(struct pp_hwmgr * hwmgr,bool bgate)1969 static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
1970 {
1971 	struct smu8_hwmgr *data = hwmgr->backend;
1972 
1973 	if (data->acp_power_gated == bgate)
1974 		return;
1975 
1976 	if (bgate)
1977 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
1978 	else
1979 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
1980 }
1981 
1982 #define WIDTH_4K		3840
1983 
smu8_dpm_powergate_uvd(struct pp_hwmgr * hwmgr,bool bgate)1984 static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1985 {
1986 	struct smu8_hwmgr *data = hwmgr->backend;
1987 	struct amdgpu_device *adev = hwmgr->adev;
1988 
1989 	data->uvd_power_gated = bgate;
1990 
1991 	if (bgate) {
1992 		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1993 						AMD_IP_BLOCK_TYPE_UVD,
1994 						AMD_PG_STATE_GATE);
1995 		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1996 						AMD_IP_BLOCK_TYPE_UVD,
1997 						AMD_CG_STATE_GATE);
1998 		smu8_dpm_update_uvd_dpm(hwmgr, true);
1999 		smu8_dpm_powerdown_uvd(hwmgr);
2000 	} else {
2001 		smu8_dpm_powerup_uvd(hwmgr);
2002 		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
2003 						AMD_IP_BLOCK_TYPE_UVD,
2004 						AMD_CG_STATE_UNGATE);
2005 		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
2006 						AMD_IP_BLOCK_TYPE_UVD,
2007 						AMD_PG_STATE_UNGATE);
2008 		smu8_dpm_update_uvd_dpm(hwmgr, false);
2009 	}
2010 
2011 	/* enable/disable Low Memory PState for UVD (4k videos) */
2012 	if (adev->asic_type == CHIP_STONEY &&
2013 	    adev->uvd.decode_image_width >= WIDTH_4K)
2014 		smu8_nbdpm_pstate_enable_disable(hwmgr,
2015 						 bgate,
2016 						 true);
2017 }
2018 
smu8_dpm_powergate_vce(struct pp_hwmgr * hwmgr,bool bgate)2019 static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
2020 {
2021 	struct smu8_hwmgr *data = hwmgr->backend;
2022 
2023 	if (bgate) {
2024 		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
2025 					AMD_IP_BLOCK_TYPE_VCE,
2026 					AMD_PG_STATE_GATE);
2027 		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
2028 					AMD_IP_BLOCK_TYPE_VCE,
2029 					AMD_CG_STATE_GATE);
2030 		smu8_enable_disable_vce_dpm(hwmgr, false);
2031 		smu8_dpm_powerdown_vce(hwmgr);
2032 		data->vce_power_gated = true;
2033 	} else {
2034 		smu8_dpm_powerup_vce(hwmgr);
2035 		data->vce_power_gated = false;
2036 		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
2037 					AMD_IP_BLOCK_TYPE_VCE,
2038 					AMD_CG_STATE_UNGATE);
2039 		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
2040 					AMD_IP_BLOCK_TYPE_VCE,
2041 					AMD_PG_STATE_UNGATE);
2042 		smu8_dpm_update_vce_dpm(hwmgr);
2043 		smu8_enable_disable_vce_dpm(hwmgr, true);
2044 	}
2045 }
2046 
2047 static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
2048 	.backend_init = smu8_hwmgr_backend_init,
2049 	.backend_fini = smu8_hwmgr_backend_fini,
2050 	.apply_state_adjust_rules = smu8_apply_state_adjust_rules,
2051 	.force_dpm_level = smu8_dpm_force_dpm_level,
2052 	.get_power_state_size = smu8_get_power_state_size,
2053 	.powergate_uvd = smu8_dpm_powergate_uvd,
2054 	.powergate_vce = smu8_dpm_powergate_vce,
2055 	.powergate_acp = smu8_dpm_powergate_acp,
2056 	.get_mclk = smu8_dpm_get_mclk,
2057 	.get_sclk = smu8_dpm_get_sclk,
2058 	.patch_boot_state = smu8_dpm_patch_boot_state,
2059 	.get_pp_table_entry = smu8_dpm_get_pp_table_entry,
2060 	.get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
2061 	.set_cpu_power_state = smu8_set_cpu_power_state,
2062 	.store_cc6_data = smu8_store_cc6_data,
2063 	.force_clock_level = smu8_force_clock_level,
2064 	.emit_clock_levels = smu8_emit_clock_levels,
2065 	.get_dal_power_level = smu8_get_dal_power_level,
2066 	.get_performance_level = smu8_get_performance_level,
2067 	.get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
2068 	.get_clock_by_type = smu8_get_clock_by_type,
2069 	.get_max_high_clocks = smu8_get_max_high_clocks,
2070 	.read_sensor = smu8_read_sensor,
2071 	.power_off_asic = smu8_power_off_asic,
2072 	.asic_setup = smu8_setup_asic_task,
2073 	.dynamic_state_management_enable = smu8_enable_dpm_tasks,
2074 	.power_state_set = smu8_set_power_state_tasks,
2075 	.dynamic_state_management_disable = smu8_disable_dpm_tasks,
2076 	.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
2077 	.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
2078 };
2079 
smu8_init_function_pointers(struct pp_hwmgr * hwmgr)2080 int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
2081 {
2082 	hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
2083 	hwmgr->pptable_func = &pptable_funcs;
2084 	return 0;
2085 }
2086