xref: /linux/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c (revision 6f8e98b944735c6c403f2572a6c441e11fe229ed)
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include "linux/delay.h"
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 
29 #include "smumgr.h"
30 #include "pp_debug.h"
31 #include "ci_smumgr.h"
32 #include "ppsmc.h"
33 #include "smu7_hwmgr.h"
34 #include "hardwaremanager.h"
35 #include "ppatomctrl.h"
36 #include "cgs_common.h"
37 #include "atombios.h"
38 #include "pppcielanes.h"
39 #include "smu7_smumgr.h"
40 
41 #include "smu/smu_7_0_1_d.h"
42 #include "smu/smu_7_0_1_sh_mask.h"
43 
44 #include "dce/dce_8_0_d.h"
45 #include "dce/dce_8_0_sh_mask.h"
46 
47 #include "bif/bif_4_1_d.h"
48 #include "bif/bif_4_1_sh_mask.h"
49 
50 #include "gca/gfx_7_2_d.h"
51 #include "gca/gfx_7_2_sh_mask.h"
52 
53 #include "gmc/gmc_7_1_d.h"
54 #include "gmc/gmc_7_1_sh_mask.h"
55 
56 #include "processpptables.h"
57 
58 #define MC_CG_ARB_FREQ_F0           0x0a
59 #define MC_CG_ARB_FREQ_F1           0x0b
60 #define MC_CG_ARB_FREQ_F2           0x0c
61 #define MC_CG_ARB_FREQ_F3           0x0d
62 
63 #define SMC_RAM_END 0x40000
64 
65 #define CISLAND_MINIMUM_ENGINE_CLOCK 800
66 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
67 
68 static const struct ci_pt_defaults defaults_hawaii_xt = {
69 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
70 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
71 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
72 };
73 
74 static const struct ci_pt_defaults defaults_hawaii_pro = {
75 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
76 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
77 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
78 };
79 
80 static const struct ci_pt_defaults defaults_bonaire_xt = {
81 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
82 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
83 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
84 };
85 
86 
87 static const struct ci_pt_defaults defaults_saturn_xt = {
88 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
89 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
90 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
91 };
92 
93 
94 static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
95 					uint32_t smc_addr, uint32_t limit)
96 {
97 	if ((0 != (3 & smc_addr))
98 		|| ((smc_addr + 3) >= limit)) {
99 		pr_err("smc_addr invalid \n");
100 		return -EINVAL;
101 	}
102 
103 	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
104 	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
105 	return 0;
106 }
107 
108 static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
109 				const uint8_t *src, uint32_t byte_count, uint32_t limit)
110 {
111 	int result;
112 	uint32_t data = 0;
113 	uint32_t original_data;
114 	uint32_t addr = 0;
115 	uint32_t extra_shift;
116 
117 	if ((3 & smc_start_address)
118 		|| ((smc_start_address + byte_count) >= limit)) {
119 		pr_err("smc_start_address invalid \n");
120 		return -EINVAL;
121 	}
122 
123 	addr = smc_start_address;
124 
125 	while (byte_count >= 4) {
126 	/* Bytes are written into the SMC address space with the MSB first. */
127 		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
128 
129 		result = ci_set_smc_sram_address(hwmgr, addr, limit);
130 
131 		if (0 != result)
132 			return result;
133 
134 		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
135 
136 		src += 4;
137 		byte_count -= 4;
138 		addr += 4;
139 	}
140 
141 	if (0 != byte_count) {
142 
143 		data = 0;
144 
145 		result = ci_set_smc_sram_address(hwmgr, addr, limit);
146 
147 		if (0 != result)
148 			return result;
149 
150 
151 		original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
152 
153 		extra_shift = 8 * (4 - byte_count);
154 
155 		while (byte_count > 0) {
156 			/* Bytes are written into the SMC addres space with the MSB first. */
157 			data = (0x100 * data) + *src++;
158 			byte_count--;
159 		}
160 
161 		data <<= extra_shift;
162 
163 		data |= (original_data & ~((~0UL) << extra_shift));
164 
165 		result = ci_set_smc_sram_address(hwmgr, addr, limit);
166 
167 		if (0 != result)
168 			return result;
169 
170 		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
171 	}
172 
173 	return 0;
174 }
175 
176 
177 static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
178 {
179 	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
180 
181 	ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
182 
183 	return 0;
184 }
185 
186 static bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
187 {
188 	return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
189 			CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
190 	&& (0x20100 <= cgs_read_ind_register(hwmgr->device,
191 			CGS_IND_REG__SMC, ixSMC_PC_C)));
192 }
193 
194 static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
195 				uint32_t *value, uint32_t limit)
196 {
197 	int result;
198 
199 	result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
200 
201 	if (result)
202 		return result;
203 
204 	*value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
205 	return 0;
206 }
207 
208 static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
209 {
210 	struct amdgpu_device *adev = hwmgr->adev;
211 	int ret;
212 
213 	cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
214 	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
215 
216 	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
217 
218 	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
219 
220 	if (ret != 1)
221 		dev_info(adev->dev,
222 			"failed to send message %x ret is %d\n", msg,ret);
223 
224 	return 0;
225 }
226 
227 static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
228 					uint16_t msg, uint32_t parameter)
229 {
230 	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
231 	return ci_send_msg_to_smc(hwmgr, msg);
232 }
233 
234 static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
235 {
236 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
237 	struct amdgpu_device *adev = hwmgr->adev;
238 	uint32_t dev_id;
239 
240 	dev_id = adev->pdev->device;
241 
242 	switch (dev_id) {
243 	case 0x67BA:
244 	case 0x67B1:
245 		smu_data->power_tune_defaults = &defaults_hawaii_pro;
246 		break;
247 	case 0x67B8:
248 	case 0x66B0:
249 		smu_data->power_tune_defaults = &defaults_hawaii_xt;
250 		break;
251 	case 0x6640:
252 	case 0x6641:
253 	case 0x6646:
254 	case 0x6647:
255 		smu_data->power_tune_defaults = &defaults_saturn_xt;
256 		break;
257 	case 0x6649:
258 	case 0x6650:
259 	case 0x6651:
260 	case 0x6658:
261 	case 0x665C:
262 	case 0x665D:
263 	case 0x67A0:
264 	case 0x67A1:
265 	case 0x67A2:
266 	case 0x67A8:
267 	case 0x67A9:
268 	case 0x67AA:
269 	case 0x67B9:
270 	case 0x67BE:
271 	default:
272 		smu_data->power_tune_defaults = &defaults_bonaire_xt;
273 		break;
274 	}
275 }
276 
277 static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
278 	struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
279 	uint32_t clock, uint32_t *vol)
280 {
281 	uint32_t i = 0;
282 
283 	if (allowed_clock_voltage_table->count == 0)
284 		return -EINVAL;
285 
286 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
287 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
288 			*vol = allowed_clock_voltage_table->entries[i].v;
289 			return 0;
290 		}
291 	}
292 
293 	*vol = allowed_clock_voltage_table->entries[i - 1].v;
294 	return 0;
295 }
296 
297 static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
298 		uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
299 {
300 	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
301 	struct pp_atomctrl_clock_dividers_vi dividers;
302 	uint32_t spll_func_cntl            = data->clock_registers.vCG_SPLL_FUNC_CNTL;
303 	uint32_t spll_func_cntl_3          = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
304 	uint32_t spll_func_cntl_4          = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
305 	uint32_t cg_spll_spread_spectrum   = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
306 	uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
307 	uint32_t ref_clock;
308 	uint32_t ref_divider;
309 	uint32_t fbdiv;
310 	int result;
311 
312 	/* get the engine clock dividers for this clock value */
313 	result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock,  &dividers);
314 
315 	PP_ASSERT_WITH_CODE(result == 0,
316 			"Error retrieving Engine Clock dividers from VBIOS.",
317 			return result);
318 
319 	/* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
320 	ref_clock = atomctrl_get_reference_clock(hwmgr);
321 	ref_divider = 1 + dividers.uc_pll_ref_div;
322 
323 	/* low 14 bits is fraction and high 12 bits is divider */
324 	fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
325 
326 	/* SPLL_FUNC_CNTL setup */
327 	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
328 			SPLL_REF_DIV, dividers.uc_pll_ref_div);
329 	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
330 			SPLL_PDIV_A,  dividers.uc_pll_post_div);
331 
332 	/* SPLL_FUNC_CNTL_3 setup*/
333 	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
334 			SPLL_FB_DIV, fbdiv);
335 
336 	/* set to use fractional accumulation*/
337 	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
338 			SPLL_DITHEN, 1);
339 
340 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
341 				PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
342 		struct pp_atomctrl_internal_ss_info ss_info;
343 		uint32_t vco_freq = clock * dividers.uc_pll_post_div;
344 
345 		if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
346 				vco_freq, &ss_info)) {
347 			uint32_t clk_s = ref_clock * 5 /
348 					(ref_divider * ss_info.speed_spectrum_rate);
349 			uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
350 					fbdiv / (clk_s * 10000);
351 
352 			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
353 					CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
354 			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
355 					CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
356 			cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
357 					CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
358 		}
359 	}
360 
361 	sclk->SclkFrequency        = clock;
362 	sclk->CgSpllFuncCntl3      = spll_func_cntl_3;
363 	sclk->CgSpllFuncCntl4      = spll_func_cntl_4;
364 	sclk->SpllSpreadSpectrum   = cg_spll_spread_spectrum;
365 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
366 	sclk->SclkDid              = (uint8_t)dividers.pll_post_divider;
367 
368 	return 0;
369 }
370 
371 static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
372 				const struct phm_phase_shedding_limits_table *pl,
373 					uint32_t sclk, uint32_t *p_shed)
374 {
375 	unsigned int i;
376 
377 	/* use the minimum phase shedding */
378 	*p_shed = 1;
379 
380 	for (i = 0; i < pl->count; i++) {
381 		if (sclk < pl->entries[i].Sclk) {
382 			*p_shed = i;
383 			break;
384 		}
385 	}
386 }
387 
388 static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
389 			uint32_t clock_insr)
390 {
391 	uint8_t i;
392 	uint32_t temp;
393 	uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
394 
395 	if (clock < min) {
396 		pr_info("Engine clock can't satisfy stutter requirement!\n");
397 		return 0;
398 	}
399 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
400 		temp = clock >> i;
401 
402 		if (temp >= min || i == 0)
403 			break;
404 	}
405 	return i;
406 }
407 
408 static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
409 		uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level)
410 {
411 	int result;
412 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
413 
414 
415 	result = ci_calculate_sclk_params(hwmgr, clock, level);
416 
417 	/* populate graphics levels */
418 	result = ci_get_dependency_volt_by_clk(hwmgr,
419 			hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
420 			(uint32_t *)(&level->MinVddc));
421 	if (result) {
422 		pr_err("vdd_dep_on_sclk table is NULL\n");
423 		return result;
424 	}
425 
426 	level->SclkFrequency = clock;
427 	level->MinVddcPhases = 1;
428 
429 	if (data->vddc_phase_shed_control)
430 		ci_populate_phase_value_based_on_sclk(hwmgr,
431 				hwmgr->dyn_state.vddc_phase_shed_limits_table,
432 				clock,
433 				&level->MinVddcPhases);
434 
435 	level->ActivityLevel = data->current_profile_setting.sclk_activity;
436 	level->CcPwrDynRm = 0;
437 	level->CcPwrDynRm1 = 0;
438 	level->EnabledForActivity = 0;
439 	/* this level can be used for throttling.*/
440 	level->EnabledForThrottle = 1;
441 	level->UpH = data->current_profile_setting.sclk_up_hyst;
442 	level->DownH = data->current_profile_setting.sclk_down_hyst;
443 	level->VoltageDownH = 0;
444 	level->PowerThrottle = 0;
445 
446 
447 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
448 			PHM_PlatformCaps_SclkDeepSleep))
449 		level->DeepSleepDivId =
450 				ci_get_sleep_divider_id_from_clock(clock,
451 						CISLAND_MINIMUM_ENGINE_CLOCK);
452 
453 	/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
454 	level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
455 
456 	if (0 == result) {
457 		level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
458 		CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
459 		CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
460 		CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
461 		CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
462 		CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
463 		CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
464 		CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
465 		CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
466 		CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
467 	}
468 
469 	return result;
470 }
471 
472 static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
473 {
474 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
475 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
476 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
477 	int result = 0;
478 	uint32_t array = smu_data->dpm_table_start +
479 			offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
480 	uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
481 			SMU7_MAX_LEVELS_GRAPHICS;
482 	struct SMU7_Discrete_GraphicsLevel *levels =
483 			smu_data->smc_state_table.GraphicsLevel;
484 	uint32_t i;
485 
486 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
487 		result = ci_populate_single_graphic_level(hwmgr,
488 				dpm_table->sclk_table.dpm_levels[i].value,
489 				&levels[i]);
490 		if (result)
491 			return result;
492 		if (i > 1)
493 			smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
494 		if (i == (dpm_table->sclk_table.count - 1))
495 			smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
496 				PPSMC_DISPLAY_WATERMARK_HIGH;
497 	}
498 
499 	smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
500 
501 	smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
502 	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
503 		phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
504 
505 	result = ci_copy_bytes_to_smc(hwmgr, array,
506 				   (u8 *)levels, array_size,
507 				   SMC_RAM_END);
508 
509 	return result;
510 
511 }
512 
513 static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
514 {
515 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
516 	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
517 
518 	smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
519 	smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
520 	smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
521 	smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
522 
523 	return 0;
524 }
525 
526 static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
527 {
528 	uint16_t tdc_limit;
529 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
530 	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
531 
532 	tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
533 	smu_data->power_tune_table.TDC_VDDC_PkgLimit =
534 			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
535 	smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
536 			defaults->tdc_vddc_throttle_release_limit_perc;
537 	smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
538 
539 	return 0;
540 }
541 
542 static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
543 {
544 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
545 	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
546 	uint32_t temp;
547 
548 	if (ci_read_smc_sram_dword(hwmgr,
549 			fuse_table_offset +
550 			offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
551 			(uint32_t *)&temp, SMC_RAM_END))
552 		PP_ASSERT_WITH_CODE(false,
553 				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
554 				return -EINVAL);
555 	else
556 		smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
557 
558 	return 0;
559 }
560 
561 static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
562 {
563 	uint16_t tmp;
564 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
565 
566 	if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
567 		|| 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
568 		tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
569 	else
570 		tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
571 
572 	smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
573 
574 	return 0;
575 }
576 
577 static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
578 {
579 	int i;
580 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
581 	uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
582 	uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
583 	uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
584 
585 	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
586 			    "The CAC Leakage table does not exist!", return -EINVAL);
587 	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
588 			    "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
589 	PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
590 			    "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
591 
592 	for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
593 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
594 			lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
595 			hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
596 			hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
597 		} else {
598 			lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
599 			hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
600 		}
601 	}
602 
603 	return 0;
604 }
605 
606 static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
607 {
608 	int i;
609 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
610 	uint8_t *vid = smu_data->power_tune_table.VddCVid;
611 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
612 
613 	PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
614 		"There should never be more than 8 entries for VddcVid!!!",
615 		return -EINVAL);
616 
617 	for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
618 		vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
619 
620 	return 0;
621 }
622 
623 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
624 {
625 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
626 	u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
627 	u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
628 	int i, min, max;
629 
630 	min = max = hi_vid[0];
631 	for (i = 0; i < 8; i++) {
632 		if (0 != hi_vid[i]) {
633 			if (min > hi_vid[i])
634 				min = hi_vid[i];
635 			if (max < hi_vid[i])
636 				max = hi_vid[i];
637 		}
638 
639 		if (0 != lo_vid[i]) {
640 			if (min > lo_vid[i])
641 				min = lo_vid[i];
642 			if (max < lo_vid[i])
643 				max = lo_vid[i];
644 		}
645 	}
646 
647 	if ((min == 0) || (max == 0))
648 		return -EINVAL;
649 	smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
650 	smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
651 
652 	return 0;
653 }
654 
655 static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
656 {
657 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
658 	uint16_t HiSidd;
659 	uint16_t LoSidd;
660 	struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
661 
662 	HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
663 	LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
664 
665 	smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
666 			CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
667 	smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
668 			CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
669 
670 	return 0;
671 }
672 
673 static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
674 {
675 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
676 	uint32_t pm_fuse_table_offset;
677 	int ret = 0;
678 
679 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
680 			PHM_PlatformCaps_PowerContainment)) {
681 		if (ci_read_smc_sram_dword(hwmgr,
682 				SMU7_FIRMWARE_HEADER_LOCATION +
683 				offsetof(SMU7_Firmware_Header, PmFuseTable),
684 				&pm_fuse_table_offset, SMC_RAM_END)) {
685 			pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
686 			return -EINVAL;
687 		}
688 
689 		/* DW0 - DW3 */
690 		ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
691 		/* DW4 - DW5 */
692 		ret |= ci_populate_vddc_vid(hwmgr);
693 		/* DW6 */
694 		ret |= ci_populate_svi_load_line(hwmgr);
695 		/* DW7 */
696 		ret |= ci_populate_tdc_limit(hwmgr);
697 		/* DW8 */
698 		ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
699 
700 		ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
701 
702 		ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
703 
704 		ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
705 		if (ret)
706 			return ret;
707 
708 		ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
709 				(uint8_t *)&smu_data->power_tune_table,
710 				sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
711 	}
712 	return ret;
713 }
714 
715 static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
716 {
717 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
718 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
719 	const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
720 	SMU7_Discrete_DpmTable  *dpm_table = &(smu_data->smc_state_table);
721 	struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
722 	struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
723 	const uint16_t *def1, *def2;
724 	int i, j, k;
725 
726 	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
727 	dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
728 
729 	dpm_table->DTETjOffset = 0;
730 	dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
731 	dpm_table->GpuTjHyst = 8;
732 
733 	dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
734 
735 	if (ppm) {
736 		dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
737 		dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
738 	} else {
739 		dpm_table->PPM_PkgPwrLimit = 0;
740 		dpm_table->PPM_TemperatureLimit = 0;
741 	}
742 
743 	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
744 	CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
745 
746 	dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
747 	def1 = defaults->bapmti_r;
748 	def2 = defaults->bapmti_rc;
749 
750 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
751 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
752 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
753 				dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
754 				dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
755 				def1++;
756 				def2++;
757 			}
758 		}
759 	}
760 
761 	return 0;
762 }
763 
764 static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
765 		pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
766 		uint16_t *lo)
767 {
768 	uint16_t v_index;
769 	bool vol_found = false;
770 	*hi = tab->value * VOLTAGE_SCALE;
771 	*lo = tab->value * VOLTAGE_SCALE;
772 
773 	PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
774 			"The SCLK/VDDC Dependency Table does not exist.\n",
775 			return -EINVAL);
776 
777 	if (NULL == hwmgr->dyn_state.cac_leakage_table) {
778 		pr_warn("CAC Leakage Table does not exist, using vddc.\n");
779 		return 0;
780 	}
781 
782 	for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
783 		if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
784 			vol_found = true;
785 			if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
786 				*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
787 				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
788 			} else {
789 				pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
790 				*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
791 				*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
792 			}
793 			break;
794 		}
795 	}
796 
797 	if (!vol_found) {
798 		for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
799 			if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
800 				vol_found = true;
801 				if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
802 					*lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
803 					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
804 				} else {
805 					pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
806 					*lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
807 					*hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
808 				}
809 				break;
810 			}
811 		}
812 
813 		if (!vol_found)
814 			pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
815 	}
816 
817 	return 0;
818 }
819 
820 static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
821 		pp_atomctrl_voltage_table_entry *tab,
822 		SMU7_Discrete_VoltageLevel *smc_voltage_tab)
823 {
824 	int result;
825 
826 	result = ci_get_std_voltage_value_sidd(hwmgr, tab,
827 			&smc_voltage_tab->StdVoltageHiSidd,
828 			&smc_voltage_tab->StdVoltageLoSidd);
829 	if (result) {
830 		smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
831 		smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
832 	}
833 
834 	smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
835 	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
836 	CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
837 
838 	return 0;
839 }
840 
841 static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
842 			SMU7_Discrete_DpmTable *table)
843 {
844 	unsigned int count;
845 	int result;
846 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
847 
848 	table->VddcLevelCount = data->vddc_voltage_table.count;
849 	for (count = 0; count < table->VddcLevelCount; count++) {
850 		result = ci_populate_smc_voltage_table(hwmgr,
851 				&(data->vddc_voltage_table.entries[count]),
852 				&(table->VddcLevel[count]));
853 		PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
854 
855 		/* GPIO voltage control */
856 		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
857 			table->VddcLevel[count].Smio = (uint8_t) count;
858 			table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
859 			table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
860 		} else {
861 			table->VddcLevel[count].Smio = 0;
862 		}
863 	}
864 
865 	CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
866 
867 	return 0;
868 }
869 
870 static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
871 			SMU7_Discrete_DpmTable *table)
872 {
873 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
874 	uint32_t count;
875 	int result;
876 
877 	table->VddciLevelCount = data->vddci_voltage_table.count;
878 
879 	for (count = 0; count < table->VddciLevelCount; count++) {
880 		result = ci_populate_smc_voltage_table(hwmgr,
881 				&(data->vddci_voltage_table.entries[count]),
882 				&(table->VddciLevel[count]));
883 		PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
884 		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
885 			table->VddciLevel[count].Smio = (uint8_t) count;
886 			table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
887 			table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
888 		} else {
889 			table->VddciLevel[count].Smio = 0;
890 		}
891 	}
892 
893 	CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
894 
895 	return 0;
896 }
897 
898 static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
899 			SMU7_Discrete_DpmTable *table)
900 {
901 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
902 	uint32_t count;
903 	int result;
904 
905 	table->MvddLevelCount = data->mvdd_voltage_table.count;
906 
907 	for (count = 0; count < table->MvddLevelCount; count++) {
908 		result = ci_populate_smc_voltage_table(hwmgr,
909 				&(data->mvdd_voltage_table.entries[count]),
910 				&table->MvddLevel[count]);
911 		PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
912 		if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
913 			table->MvddLevel[count].Smio = (uint8_t) count;
914 			table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
915 			table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
916 		} else {
917 			table->MvddLevel[count].Smio = 0;
918 		}
919 	}
920 
921 	CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
922 
923 	return 0;
924 }
925 
926 
927 static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
928 	SMU7_Discrete_DpmTable *table)
929 {
930 	int result;
931 
932 	result = ci_populate_smc_vddc_table(hwmgr, table);
933 	PP_ASSERT_WITH_CODE(0 == result,
934 			"can not populate VDDC voltage table to SMC", return -EINVAL);
935 
936 	result = ci_populate_smc_vdd_ci_table(hwmgr, table);
937 	PP_ASSERT_WITH_CODE(0 == result,
938 			"can not populate VDDCI voltage table to SMC", return -EINVAL);
939 
940 	result = ci_populate_smc_mvdd_table(hwmgr, table);
941 	PP_ASSERT_WITH_CODE(0 == result,
942 			"can not populate MVDD voltage table to SMC", return -EINVAL);
943 
944 	return 0;
945 }
946 
947 static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
948 		struct SMU7_Discrete_Ulv *state)
949 {
950 	uint32_t voltage_response_time, ulv_voltage;
951 	int result;
952 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
953 
954 	state->CcPwrDynRm = 0;
955 	state->CcPwrDynRm1 = 0;
956 
957 	result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
958 	PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
959 
960 	if (ulv_voltage == 0) {
961 		data->ulv_supported = false;
962 		return 0;
963 	}
964 
965 	if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
966 		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
967 		if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
968 			state->VddcOffset = 0;
969 		else
970 			/* used in SMIO Mode. not implemented for now. this is backup only for CI. */
971 			state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
972 	} else {
973 		/* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
974 		if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
975 			state->VddcOffsetVid = 0;
976 		else  /* used in SVI2 Mode */
977 			state->VddcOffsetVid = (uint8_t)(
978 					(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
979 						* VOLTAGE_VID_OFFSET_SCALE2
980 						/ VOLTAGE_VID_OFFSET_SCALE1);
981 	}
982 	state->VddcPhase = 1;
983 
984 	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
985 	CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
986 	CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
987 
988 	return 0;
989 }
990 
991 static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
992 		 SMU7_Discrete_Ulv *ulv_level)
993 {
994 	return ci_populate_ulv_level(hwmgr, ulv_level);
995 }
996 
997 static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
998 {
999 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1000 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
1001 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1002 	uint32_t i;
1003 
1004 /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
1005 	for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1006 		table->LinkLevel[i].PcieGenSpeed  =
1007 			(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1008 		table->LinkLevel[i].PcieLaneCount =
1009 			(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1010 		table->LinkLevel[i].EnabledForActivity = 1;
1011 		table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
1012 		table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
1013 	}
1014 
1015 	smu_data->smc_state_table.LinkLevelCount =
1016 		(uint8_t)dpm_table->pcie_speed_table.count;
1017 	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1018 		phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1019 
1020 	return 0;
1021 }
1022 
1023 static int ci_calculate_mclk_params(
1024 		struct pp_hwmgr *hwmgr,
1025 		uint32_t memory_clock,
1026 		SMU7_Discrete_MemoryLevel *mclk,
1027 		bool strobe_mode,
1028 		bool dllStateOn
1029 		)
1030 {
1031 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1032 	uint32_t  dll_cntl = data->clock_registers.vDLL_CNTL;
1033 	uint32_t  mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1034 	uint32_t  mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1035 	uint32_t  mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1036 	uint32_t  mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1037 	uint32_t  mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1038 	uint32_t  mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1039 	uint32_t  mpll_ss1 = data->clock_registers.vMPLL_SS1;
1040 	uint32_t  mpll_ss2 = data->clock_registers.vMPLL_SS2;
1041 
1042 	pp_atomctrl_memory_clock_param mpll_param;
1043 	int result;
1044 
1045 	result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1046 				memory_clock, &mpll_param, strobe_mode);
1047 	PP_ASSERT_WITH_CODE(0 == result,
1048 		"Error retrieving Memory Clock Parameters from VBIOS.", return result);
1049 
1050 	mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1051 
1052 	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
1053 							MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1054 	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
1055 							MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1056 	mpll_func_cntl_1  = PHM_SET_FIELD(mpll_func_cntl_1,
1057 							MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1058 
1059 	mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1060 							MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1061 
1062 	if (data->is_memory_gddr5) {
1063 		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
1064 								MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1065 		mpll_dq_func_cntl  = PHM_SET_FIELD(mpll_dq_func_cntl,
1066 								MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1067 	}
1068 
1069 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1070 			PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1071 		pp_atomctrl_internal_ss_info ss_info;
1072 		uint32_t freq_nom;
1073 		uint32_t tmp;
1074 		uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1075 
1076 		/* for GDDR5 for all modes and DDR3 */
1077 		if (1 == mpll_param.qdr)
1078 			freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1079 		else
1080 			freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1081 
1082 		/* tmp = (freq_nom / reference_clock * reference_divider) ^ 2  Note: S.I. reference_divider = 1*/
1083 		tmp = (freq_nom / reference_clock);
1084 		tmp = tmp * tmp;
1085 
1086 		if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1087 			uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1088 			uint32_t clkv =
1089 				(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1090 							ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1091 
1092 			mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1093 			mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1094 		}
1095 	}
1096 
1097 	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1098 		MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1099 	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1100 		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1101 	mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1102 		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1103 
1104 
1105 	mclk->MclkFrequency   = memory_clock;
1106 	mclk->MpllFuncCntl    = mpll_func_cntl;
1107 	mclk->MpllFuncCntl_1  = mpll_func_cntl_1;
1108 	mclk->MpllFuncCntl_2  = mpll_func_cntl_2;
1109 	mclk->MpllAdFuncCntl  = mpll_ad_func_cntl;
1110 	mclk->MpllDqFuncCntl  = mpll_dq_func_cntl;
1111 	mclk->MclkPwrmgtCntl  = mclk_pwrmgt_cntl;
1112 	mclk->DllCntl         = dll_cntl;
1113 	mclk->MpllSs1         = mpll_ss1;
1114 	mclk->MpllSs2         = mpll_ss2;
1115 
1116 	return 0;
1117 }
1118 
1119 static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
1120 		bool strobe_mode)
1121 {
1122 	uint8_t mc_para_index;
1123 
1124 	if (strobe_mode) {
1125 		if (memory_clock < 12500)
1126 			mc_para_index = 0x00;
1127 		else if (memory_clock > 47500)
1128 			mc_para_index = 0x0f;
1129 		else
1130 			mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1131 	} else {
1132 		if (memory_clock < 65000)
1133 			mc_para_index = 0x00;
1134 		else if (memory_clock > 135000)
1135 			mc_para_index = 0x0f;
1136 		else
1137 			mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1138 	}
1139 
1140 	return mc_para_index;
1141 }
1142 
1143 static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1144 {
1145 	uint8_t mc_para_index;
1146 
1147 	if (memory_clock < 10000)
1148 		mc_para_index = 0;
1149 	else if (memory_clock >= 80000)
1150 		mc_para_index = 0x0f;
1151 	else
1152 		mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1153 
1154 	return mc_para_index;
1155 }
1156 
1157 static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1158 					uint32_t memory_clock, uint32_t *p_shed)
1159 {
1160 	unsigned int i;
1161 
1162 	*p_shed = 1;
1163 
1164 	for (i = 0; i < pl->count; i++) {
1165 		if (memory_clock < pl->entries[i].Mclk) {
1166 			*p_shed = i;
1167 			break;
1168 		}
1169 	}
1170 
1171 	return 0;
1172 }
1173 
1174 static int ci_populate_single_memory_level(
1175 		struct pp_hwmgr *hwmgr,
1176 		uint32_t memory_clock,
1177 		SMU7_Discrete_MemoryLevel *memory_level
1178 		)
1179 {
1180 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1181 	int result = 0;
1182 	bool dll_state_on;
1183 	uint32_t mclk_edc_wr_enable_threshold = 40000;
1184 	uint32_t mclk_edc_enable_threshold = 40000;
1185 	uint32_t mclk_strobe_mode_threshold = 40000;
1186 
1187 	if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1188 		result = ci_get_dependency_volt_by_clk(hwmgr,
1189 			hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1190 		PP_ASSERT_WITH_CODE((0 == result),
1191 			"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1192 	}
1193 
1194 	if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1195 		result = ci_get_dependency_volt_by_clk(hwmgr,
1196 				hwmgr->dyn_state.vddci_dependency_on_mclk,
1197 				memory_clock,
1198 				&memory_level->MinVddci);
1199 		PP_ASSERT_WITH_CODE((0 == result),
1200 			"can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1201 	}
1202 
1203 	if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1204 		result = ci_get_dependency_volt_by_clk(hwmgr,
1205 				hwmgr->dyn_state.mvdd_dependency_on_mclk,
1206 				memory_clock,
1207 				&memory_level->MinMvdd);
1208 		PP_ASSERT_WITH_CODE((0 == result),
1209 			"can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
1210 	}
1211 
1212 	memory_level->MinVddcPhases = 1;
1213 
1214 	if (data->vddc_phase_shed_control) {
1215 		ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1216 				memory_clock, &memory_level->MinVddcPhases);
1217 	}
1218 
1219 	memory_level->EnabledForThrottle = 1;
1220 	memory_level->EnabledForActivity = 1;
1221 	memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
1222 	memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
1223 	memory_level->VoltageDownH = 0;
1224 
1225 	/* Indicates maximum activity level for this performance level.*/
1226 	memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1227 	memory_level->StutterEnable = 0;
1228 	memory_level->StrobeEnable = 0;
1229 	memory_level->EdcReadEnable = 0;
1230 	memory_level->EdcWriteEnable = 0;
1231 	memory_level->RttEnable = 0;
1232 
1233 	/* default set to low watermark. Highest level will be set to high later.*/
1234 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1235 
1236 	data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1237 	data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
1238 
1239 	/* stutter mode not support on ci */
1240 
1241 	/* decide strobe mode*/
1242 	memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1243 		(memory_clock <= mclk_strobe_mode_threshold);
1244 
1245 	/* decide EDC mode and memory clock ratio*/
1246 	if (data->is_memory_gddr5) {
1247 		memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
1248 					memory_level->StrobeEnable);
1249 
1250 		if ((mclk_edc_enable_threshold != 0) &&
1251 				(memory_clock > mclk_edc_enable_threshold)) {
1252 			memory_level->EdcReadEnable = 1;
1253 		}
1254 
1255 		if ((mclk_edc_wr_enable_threshold != 0) &&
1256 				(memory_clock > mclk_edc_wr_enable_threshold)) {
1257 			memory_level->EdcWriteEnable = 1;
1258 		}
1259 
1260 		if (memory_level->StrobeEnable) {
1261 			if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
1262 					((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1263 				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1264 			else
1265 				dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1266 		} else
1267 			dll_state_on = data->dll_default_on;
1268 	} else {
1269 		memory_level->StrobeRatio =
1270 			ci_get_ddr3_mclk_frequency_ratio(memory_clock);
1271 		dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1272 	}
1273 
1274 	result = ci_calculate_mclk_params(hwmgr,
1275 		memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1276 
1277 	if (0 == result) {
1278 		memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1279 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1280 		memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1281 		memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1282 		/* MCLK frequency in units of 10KHz*/
1283 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1284 		/* Indicates maximum activity level for this performance level.*/
1285 		CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1286 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1287 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1288 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1289 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1290 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1291 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1292 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1293 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1294 		CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1295 	}
1296 
1297 	return result;
1298 }
1299 
1300 static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1301 {
1302 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1303 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1304 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
1305 	int result;
1306 	struct amdgpu_device *adev = hwmgr->adev;
1307 	uint32_t dev_id;
1308 
1309 	uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
1310 	uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
1311 	SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1312 	uint32_t i;
1313 
1314 	memset(levels, 0x00, level_array_size);
1315 
1316 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
1317 		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1318 			"can not populate memory level as memory clock is zero", return -EINVAL);
1319 		result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1320 			&(smu_data->smc_state_table.MemoryLevel[i]));
1321 		if (0 != result)
1322 			return result;
1323 	}
1324 
1325 	smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1326 
1327 	dev_id = adev->pdev->device;
1328 
1329 	if ((dpm_table->mclk_table.count >= 2)
1330 		&& ((dev_id == 0x67B0) ||  (dev_id == 0x67B1))) {
1331 		smu_data->smc_state_table.MemoryLevel[1].MinVddci =
1332 				smu_data->smc_state_table.MemoryLevel[0].MinVddci;
1333 		smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
1334 				smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
1335 	}
1336 	smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1337 	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1338 
1339 	smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1340 	data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1341 	smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1342 
1343 	result = ci_copy_bytes_to_smc(hwmgr,
1344 		level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1345 		SMC_RAM_END);
1346 
1347 	return result;
1348 }
1349 
1350 static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1351 					SMU7_Discrete_VoltageLevel *voltage)
1352 {
1353 	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1354 
1355 	uint32_t i = 0;
1356 
1357 	if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1358 		/* find mvdd value which clock is more than request */
1359 		for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1360 			if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1361 				/* Always round to higher voltage. */
1362 				voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1363 				break;
1364 			}
1365 		}
1366 
1367 		PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1368 			"MVDD Voltage is outside the supported range.", return -EINVAL);
1369 
1370 	} else {
1371 		return -EINVAL;
1372 	}
1373 
1374 	return 0;
1375 }
1376 
1377 static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1378 	SMU7_Discrete_DpmTable *table)
1379 {
1380 	int result = 0;
1381 	const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1382 	struct pp_atomctrl_clock_dividers_vi dividers;
1383 
1384 	SMU7_Discrete_VoltageLevel voltage_level;
1385 	uint32_t spll_func_cntl    = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1386 	uint32_t spll_func_cntl_2  = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1387 	uint32_t dll_cntl          = data->clock_registers.vDLL_CNTL;
1388 	uint32_t mclk_pwrmgt_cntl  = data->clock_registers.vMCLK_PWRMGT_CNTL;
1389 
1390 
1391 	/* The ACPI state should not do DPM on DC (or ever).*/
1392 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1393 
1394 	if (data->acpi_vddc)
1395 		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1396 	else
1397 		table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1398 
1399 	table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
1400 	/* assign zero for now*/
1401 	table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1402 
1403 	/* get the engine clock dividers for this clock value*/
1404 	result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1405 		table->ACPILevel.SclkFrequency,  &dividers);
1406 
1407 	PP_ASSERT_WITH_CODE(result == 0,
1408 		"Error retrieving Engine Clock dividers from VBIOS.", return result);
1409 
1410 	/* divider ID for required SCLK*/
1411 	table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1412 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1413 	table->ACPILevel.DeepSleepDivId = 0;
1414 
1415 	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
1416 							CG_SPLL_FUNC_CNTL,   SPLL_PWRON,     0);
1417 	spll_func_cntl      = PHM_SET_FIELD(spll_func_cntl,
1418 							CG_SPLL_FUNC_CNTL,   SPLL_RESET,     1);
1419 	spll_func_cntl_2    = PHM_SET_FIELD(spll_func_cntl_2,
1420 							CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL,   4);
1421 
1422 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1423 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1424 	table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1425 	table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1426 	table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1427 	table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1428 	table->ACPILevel.CcPwrDynRm = 0;
1429 	table->ACPILevel.CcPwrDynRm1 = 0;
1430 
1431 	/* For various features to be enabled/disabled while this level is active.*/
1432 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1433 	/* SCLK frequency in units of 10KHz*/
1434 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1435 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1436 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1437 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1438 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1439 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1440 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1441 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1442 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1443 
1444 
1445 	/* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1446 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1447 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1448 
1449 	if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1450 		table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1451 	else {
1452 		if (data->acpi_vddci != 0)
1453 			table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1454 		else
1455 			table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1456 	}
1457 
1458 	if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
1459 		table->MemoryACPILevel.MinMvdd =
1460 			PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1461 	else
1462 		table->MemoryACPILevel.MinMvdd = 0;
1463 
1464 	/* Force reset on DLL*/
1465 	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1466 		MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1467 	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1468 		MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1469 
1470 	/* Disable DLL in ACPIState*/
1471 	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1472 		MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1473 	mclk_pwrmgt_cntl    = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1474 		MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1475 
1476 	/* Enable DLL bypass signal*/
1477 	dll_cntl            = PHM_SET_FIELD(dll_cntl,
1478 		DLL_CNTL, MRDCK0_BYPASS, 0);
1479 	dll_cntl            = PHM_SET_FIELD(dll_cntl,
1480 		DLL_CNTL, MRDCK1_BYPASS, 0);
1481 
1482 	table->MemoryACPILevel.DllCntl            =
1483 		PP_HOST_TO_SMC_UL(dll_cntl);
1484 	table->MemoryACPILevel.MclkPwrmgtCntl     =
1485 		PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1486 	table->MemoryACPILevel.MpllAdFuncCntl     =
1487 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1488 	table->MemoryACPILevel.MpllDqFuncCntl     =
1489 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1490 	table->MemoryACPILevel.MpllFuncCntl       =
1491 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1492 	table->MemoryACPILevel.MpllFuncCntl_1     =
1493 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1494 	table->MemoryACPILevel.MpllFuncCntl_2     =
1495 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1496 	table->MemoryACPILevel.MpllSs1            =
1497 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1498 	table->MemoryACPILevel.MpllSs2            =
1499 		PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1500 
1501 	table->MemoryACPILevel.EnabledForThrottle = 0;
1502 	table->MemoryACPILevel.EnabledForActivity = 0;
1503 	table->MemoryACPILevel.UpH = 0;
1504 	table->MemoryACPILevel.DownH = 100;
1505 	table->MemoryACPILevel.VoltageDownH = 0;
1506 	/* Indicates maximum activity level for this performance level.*/
1507 	table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1508 
1509 	table->MemoryACPILevel.StutterEnable = 0;
1510 	table->MemoryACPILevel.StrobeEnable = 0;
1511 	table->MemoryACPILevel.EdcReadEnable = 0;
1512 	table->MemoryACPILevel.EdcWriteEnable = 0;
1513 	table->MemoryACPILevel.RttEnable = 0;
1514 
1515 	return result;
1516 }
1517 
1518 static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1519 					SMU7_Discrete_DpmTable *table)
1520 {
1521 	int result = 0;
1522 	uint8_t count;
1523 	struct pp_atomctrl_clock_dividers_vi dividers;
1524 	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1525 		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1526 
1527 	table->UvdLevelCount = (uint8_t)(uvd_table->count);
1528 
1529 	for (count = 0; count < table->UvdLevelCount; count++) {
1530 		table->UvdLevel[count].VclkFrequency =
1531 					uvd_table->entries[count].vclk;
1532 		table->UvdLevel[count].DclkFrequency =
1533 					uvd_table->entries[count].dclk;
1534 		table->UvdLevel[count].MinVddc =
1535 					uvd_table->entries[count].v * VOLTAGE_SCALE;
1536 		table->UvdLevel[count].MinVddcPhases = 1;
1537 
1538 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1539 				table->UvdLevel[count].VclkFrequency, &dividers);
1540 		PP_ASSERT_WITH_CODE((0 == result),
1541 				"can not find divide id for Vclk clock", return result);
1542 
1543 		table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1544 
1545 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1546 				table->UvdLevel[count].DclkFrequency, &dividers);
1547 		PP_ASSERT_WITH_CODE((0 == result),
1548 				"can not find divide id for Dclk clock", return result);
1549 
1550 		table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1551 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1552 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1553 		CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
1554 	}
1555 
1556 	return result;
1557 }
1558 
1559 static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1560 		SMU7_Discrete_DpmTable *table)
1561 {
1562 	int result = -EINVAL;
1563 	uint8_t count;
1564 	struct pp_atomctrl_clock_dividers_vi dividers;
1565 	struct phm_vce_clock_voltage_dependency_table *vce_table =
1566 				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1567 
1568 	table->VceLevelCount = (uint8_t)(vce_table->count);
1569 	table->VceBootLevel = 0;
1570 
1571 	for (count = 0; count < table->VceLevelCount; count++) {
1572 		table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
1573 		table->VceLevel[count].MinVoltage =
1574 				vce_table->entries[count].v * VOLTAGE_SCALE;
1575 		table->VceLevel[count].MinPhases = 1;
1576 
1577 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1578 				table->VceLevel[count].Frequency, &dividers);
1579 		PP_ASSERT_WITH_CODE((0 == result),
1580 				"can not find divide id for VCE engine clock",
1581 				return result);
1582 
1583 		table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1584 
1585 		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1586 		CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
1587 	}
1588 	return result;
1589 }
1590 
1591 static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1592 					SMU7_Discrete_DpmTable *table)
1593 {
1594 	int result = -EINVAL;
1595 	uint8_t count;
1596 	struct pp_atomctrl_clock_dividers_vi dividers;
1597 	struct phm_acp_clock_voltage_dependency_table *acp_table =
1598 				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
1599 
1600 	table->AcpLevelCount = (uint8_t)(acp_table->count);
1601 	table->AcpBootLevel = 0;
1602 
1603 	for (count = 0; count < table->AcpLevelCount; count++) {
1604 		table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
1605 		table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
1606 		table->AcpLevel[count].MinPhases = 1;
1607 
1608 		result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1609 				table->AcpLevel[count].Frequency, &dividers);
1610 		PP_ASSERT_WITH_CODE((0 == result),
1611 				"can not find divide id for engine clock", return result);
1612 
1613 		table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1614 
1615 		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1616 		CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
1617 	}
1618 	return result;
1619 }
1620 
1621 static int ci_populate_memory_timing_parameters(
1622 		struct pp_hwmgr *hwmgr,
1623 		uint32_t engine_clock,
1624 		uint32_t memory_clock,
1625 		struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
1626 		)
1627 {
1628 	uint32_t dramTiming;
1629 	uint32_t dramTiming2;
1630 	uint32_t burstTime;
1631 	int result;
1632 
1633 	result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1634 				engine_clock, memory_clock);
1635 
1636 	PP_ASSERT_WITH_CODE(result == 0,
1637 		"Error calling VBIOS to set DRAM_TIMING.", return result);
1638 
1639 	dramTiming  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1640 	dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1641 	burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1642 
1643 	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dramTiming);
1644 	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1645 	arb_regs->McArbBurstTime = (uint8_t)burstTime;
1646 
1647 	return 0;
1648 }
1649 
1650 static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1651 {
1652 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1653 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1654 	int result = 0;
1655 	SMU7_Discrete_MCArbDramTimingTable  arb_regs;
1656 	uint32_t i, j;
1657 
1658 	memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1659 
1660 	for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1661 		for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1662 			result = ci_populate_memory_timing_parameters
1663 				(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1664 				 data->dpm_table.mclk_table.dpm_levels[j].value,
1665 				 &arb_regs.entries[i][j]);
1666 
1667 			if (0 != result)
1668 				break;
1669 		}
1670 	}
1671 
1672 	if (0 == result) {
1673 		result = ci_copy_bytes_to_smc(
1674 				hwmgr,
1675 				smu_data->arb_table_start,
1676 				(uint8_t *)&arb_regs,
1677 				sizeof(SMU7_Discrete_MCArbDramTimingTable),
1678 				SMC_RAM_END
1679 				);
1680 	}
1681 
1682 	return result;
1683 }
1684 
1685 static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1686 			SMU7_Discrete_DpmTable *table)
1687 {
1688 	int result = 0;
1689 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1690 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1691 
1692 	table->GraphicsBootLevel = 0;
1693 	table->MemoryBootLevel = 0;
1694 
1695 	/* find boot level from dpm table*/
1696 	result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1697 			data->vbios_boot_state.sclk_bootup_value,
1698 			(uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1699 
1700 	if (0 != result) {
1701 		smu_data->smc_state_table.GraphicsBootLevel = 0;
1702 		pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
1703 		result = 0;
1704 	}
1705 
1706 	result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1707 		data->vbios_boot_state.mclk_bootup_value,
1708 		(uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1709 
1710 	if (0 != result) {
1711 		smu_data->smc_state_table.MemoryBootLevel = 0;
1712 		pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
1713 		result = 0;
1714 	}
1715 
1716 	table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1717 	table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1718 	table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1719 
1720 	return result;
1721 }
1722 
1723 static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1724 				 SMU7_Discrete_MCRegisters *mc_reg_table)
1725 {
1726 	const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
1727 
1728 	uint32_t i, j;
1729 
1730 	for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1731 		if (smu_data->mc_reg_table.validflag & 1<<j) {
1732 			PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1733 				"Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1734 			mc_reg_table->address[i].s0 =
1735 				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1736 			mc_reg_table->address[i].s1 =
1737 				PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1738 			i++;
1739 		}
1740 	}
1741 
1742 	mc_reg_table->last = (uint8_t)i;
1743 
1744 	return 0;
1745 }
1746 
1747 static void ci_convert_mc_registers(
1748 	const struct ci_mc_reg_entry *entry,
1749 	SMU7_Discrete_MCRegisterSet *data,
1750 	uint32_t num_entries, uint32_t valid_flag)
1751 {
1752 	uint32_t i, j;
1753 
1754 	for (i = 0, j = 0; j < num_entries; j++) {
1755 		if (valid_flag & 1<<j) {
1756 			data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1757 			i++;
1758 		}
1759 	}
1760 }
1761 
1762 static int ci_convert_mc_reg_table_entry_to_smc(
1763 		struct pp_hwmgr *hwmgr,
1764 		const uint32_t memory_clock,
1765 		SMU7_Discrete_MCRegisterSet *mc_reg_table_data
1766 		)
1767 {
1768 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1769 	uint32_t i = 0;
1770 
1771 	for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1772 		if (memory_clock <=
1773 			smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1774 			break;
1775 		}
1776 	}
1777 
1778 	if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1779 		--i;
1780 
1781 	ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1782 				mc_reg_table_data, smu_data->mc_reg_table.last,
1783 				smu_data->mc_reg_table.validflag);
1784 
1785 	return 0;
1786 }
1787 
1788 static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1789 		SMU7_Discrete_MCRegisters *mc_regs)
1790 {
1791 	int result = 0;
1792 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1793 	int res;
1794 	uint32_t i;
1795 
1796 	for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1797 		res = ci_convert_mc_reg_table_entry_to_smc(
1798 				hwmgr,
1799 				data->dpm_table.mclk_table.dpm_levels[i].value,
1800 				&mc_regs->data[i]
1801 				);
1802 
1803 		if (0 != res)
1804 			result = res;
1805 	}
1806 
1807 	return result;
1808 }
1809 
1810 static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1811 {
1812 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1813 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1814 	uint32_t address;
1815 	int32_t result;
1816 
1817 	if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1818 		return 0;
1819 
1820 
1821 	memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
1822 
1823 	result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1824 
1825 	if (result != 0)
1826 		return result;
1827 
1828 	address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
1829 
1830 	return  ci_copy_bytes_to_smc(hwmgr, address,
1831 				 (uint8_t *)&smu_data->mc_regs.data[0],
1832 				sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1833 				SMC_RAM_END);
1834 }
1835 
1836 static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1837 {
1838 	int result;
1839 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1840 
1841 	memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
1842 	result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1843 	PP_ASSERT_WITH_CODE(0 == result,
1844 		"Failed to initialize MCRegTable for the MC register addresses!", return result;);
1845 
1846 	result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1847 	PP_ASSERT_WITH_CODE(0 == result,
1848 		"Failed to initialize MCRegTable for driver state!", return result;);
1849 
1850 	return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
1851 			(uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
1852 }
1853 
1854 static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1855 {
1856 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1857 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1858 	uint8_t count, level;
1859 
1860 	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1861 
1862 	for (level = 0; level < count; level++) {
1863 		if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1864 			 >= data->vbios_boot_state.sclk_bootup_value) {
1865 			smu_data->smc_state_table.GraphicsBootLevel = level;
1866 			break;
1867 		}
1868 	}
1869 
1870 	count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1871 
1872 	for (level = 0; level < count; level++) {
1873 		if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1874 			>= data->vbios_boot_state.mclk_bootup_value) {
1875 			smu_data->smc_state_table.MemoryBootLevel = level;
1876 			break;
1877 		}
1878 	}
1879 
1880 	return 0;
1881 }
1882 
1883 static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1884 					    SMU7_Discrete_DpmTable *table)
1885 {
1886 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1887 
1888 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1889 		table->SVI2Enable = 1;
1890 	else
1891 		table->SVI2Enable = 0;
1892 	return 0;
1893 }
1894 
1895 static int ci_start_smc(struct pp_hwmgr *hwmgr)
1896 {
1897 	/* set smc instruct start point at 0x0 */
1898 	ci_program_jump_on_start(hwmgr);
1899 
1900 	/* enable smc clock */
1901 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
1902 
1903 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
1904 
1905 	PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
1906 				 INTERRUPTS_ENABLED, 1);
1907 
1908 	return 0;
1909 }
1910 
1911 static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
1912 {
1913 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1914 	uint16_t config;
1915 
1916 	config = VR_SVI2_PLANE_1;
1917 	table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1918 
1919 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1920 		config = VR_SVI2_PLANE_2;
1921 		table->VRConfig |= config;
1922 	} else {
1923 		pr_info("VDDCshould be on SVI2 controller!");
1924 	}
1925 
1926 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1927 		config = VR_SVI2_PLANE_2;
1928 		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1929 	} else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1930 		config = VR_SMIO_PATTERN_1;
1931 		table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1932 	}
1933 
1934 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1935 		config = VR_SMIO_PATTERN_2;
1936 		table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1937 	}
1938 
1939 	return 0;
1940 }
1941 
1942 static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1943 {
1944 	int result;
1945 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1946 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1947 	SMU7_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
1948 	struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1949 	u32 i;
1950 
1951 	ci_initialize_power_tune_defaults(hwmgr);
1952 	memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1953 
1954 	if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1955 		ci_populate_smc_voltage_tables(hwmgr, table);
1956 
1957 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1958 			PHM_PlatformCaps_AutomaticDCTransition))
1959 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1960 
1961 
1962 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1963 			PHM_PlatformCaps_StepVddc))
1964 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1965 
1966 	if (data->is_memory_gddr5)
1967 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1968 
1969 	if (data->ulv_supported) {
1970 		result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
1971 		PP_ASSERT_WITH_CODE(0 == result,
1972 			"Failed to initialize ULV state!", return result);
1973 
1974 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1975 			ixCG_ULV_PARAMETER, 0x40035);
1976 	}
1977 
1978 	result = ci_populate_all_graphic_levels(hwmgr);
1979 	PP_ASSERT_WITH_CODE(0 == result,
1980 		"Failed to initialize Graphics Level!", return result);
1981 
1982 	result = ci_populate_all_memory_levels(hwmgr);
1983 	PP_ASSERT_WITH_CODE(0 == result,
1984 		"Failed to initialize Memory Level!", return result);
1985 
1986 	result = ci_populate_smc_link_level(hwmgr, table);
1987 	PP_ASSERT_WITH_CODE(0 == result,
1988 		"Failed to initialize Link Level!", return result);
1989 
1990 	result = ci_populate_smc_acpi_level(hwmgr, table);
1991 	PP_ASSERT_WITH_CODE(0 == result,
1992 		"Failed to initialize ACPI Level!", return result);
1993 
1994 	result = ci_populate_smc_vce_level(hwmgr, table);
1995 	PP_ASSERT_WITH_CODE(0 == result,
1996 		"Failed to initialize VCE Level!", return result);
1997 
1998 	result = ci_populate_smc_acp_level(hwmgr, table);
1999 	PP_ASSERT_WITH_CODE(0 == result,
2000 		"Failed to initialize ACP Level!", return result);
2001 
2002 	/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2003 	/* need to populate the  ARB settings for the initial state. */
2004 	result = ci_program_memory_timing_parameters(hwmgr);
2005 	PP_ASSERT_WITH_CODE(0 == result,
2006 		"Failed to Write ARB settings for the initial state.", return result);
2007 
2008 	result = ci_populate_smc_uvd_level(hwmgr, table);
2009 	PP_ASSERT_WITH_CODE(0 == result,
2010 		"Failed to initialize UVD Level!", return result);
2011 
2012 	table->UvdBootLevel  = 0;
2013 	table->VceBootLevel  = 0;
2014 	table->AcpBootLevel  = 0;
2015 	table->SamuBootLevel  = 0;
2016 
2017 	table->GraphicsBootLevel = 0;
2018 	table->MemoryBootLevel = 0;
2019 
2020 	result = ci_populate_smc_boot_level(hwmgr, table);
2021 	PP_ASSERT_WITH_CODE(0 == result,
2022 		"Failed to initialize Boot Level!", return result);
2023 
2024 	result = ci_populate_smc_initial_state(hwmgr);
2025 	PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2026 
2027 	result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
2028 	PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2029 
2030 	table->UVDInterval = 1;
2031 	table->VCEInterval = 1;
2032 	table->ACPInterval = 1;
2033 	table->SAMUInterval = 1;
2034 	table->GraphicsVoltageChangeEnable  = 1;
2035 	table->GraphicsThermThrottleEnable  = 1;
2036 	table->GraphicsInterval = 1;
2037 	table->VoltageInterval  = 1;
2038 	table->ThermalInterval  = 1;
2039 
2040 	table->TemperatureLimitHigh =
2041 		(data->thermal_temp_setting.temperature_high *
2042 		 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2043 	table->TemperatureLimitLow =
2044 		(data->thermal_temp_setting.temperature_low *
2045 		SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2046 
2047 	table->MemoryVoltageChangeEnable  = 1;
2048 	table->MemoryInterval  = 1;
2049 	table->VoltageResponseTime  = 0;
2050 	table->VddcVddciDelta = 4000;
2051 	table->PhaseResponseTime  = 0;
2052 	table->MemoryThermThrottleEnable  = 1;
2053 
2054 	PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2055 			"There must be 1 or more PCIE levels defined in PPTable.",
2056 			return -EINVAL);
2057 
2058 	table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2059 	table->PCIeGenInterval = 1;
2060 
2061 	result = ci_populate_vr_config(hwmgr, table);
2062 	PP_ASSERT_WITH_CODE(0 == result,
2063 			"Failed to populate VRConfig setting!", return result);
2064 	data->vr_config = table->VRConfig;
2065 
2066 	ci_populate_smc_svi2_config(hwmgr, table);
2067 
2068 	for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
2069 		CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
2070 
2071 	table->ThermGpio  = 17;
2072 	table->SclkStepSize = 0x4000;
2073 	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2074 		table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2075 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2076 				PHM_PlatformCaps_RegulatorHot);
2077 	} else {
2078 		table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2079 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2080 				PHM_PlatformCaps_RegulatorHot);
2081 	}
2082 
2083 	table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2084 
2085 	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2086 	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2087 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2088 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2089 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2090 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2091 	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2092 	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2093 	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2094 	table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
2095 	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2096 	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2097 
2098 	table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2099 	table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2100 	table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2101 
2102 	/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2103 	result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
2104 					offsetof(SMU7_Discrete_DpmTable, SystemFlags),
2105 					(uint8_t *)&(table->SystemFlags),
2106 					sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
2107 					SMC_RAM_END);
2108 
2109 	PP_ASSERT_WITH_CODE(0 == result,
2110 		"Failed to upload dpm data to SMC memory!", return result;);
2111 
2112 	result = ci_populate_initial_mc_reg_table(hwmgr);
2113 	PP_ASSERT_WITH_CODE((0 == result),
2114 		"Failed to populate initialize MC Reg table!", return result);
2115 
2116 	result = ci_populate_pm_fuses(hwmgr);
2117 	PP_ASSERT_WITH_CODE(0 == result,
2118 			"Failed to  populate PM fuses to SMC memory!", return result);
2119 
2120 	ci_start_smc(hwmgr);
2121 
2122 	return 0;
2123 }
2124 
2125 static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2126 {
2127 	struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2128 	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2129 	uint32_t duty100;
2130 	uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2131 	uint16_t fdo_min, slope1, slope2;
2132 	uint32_t reference_clock;
2133 	int res;
2134 	uint64_t tmp64;
2135 
2136 	if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2137 		return 0;
2138 
2139 	if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2140 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2141 			PHM_PlatformCaps_MicrocodeFanControl);
2142 		return 0;
2143 	}
2144 
2145 	if (0 == ci_data->fan_table_start) {
2146 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2147 		return 0;
2148 	}
2149 
2150 	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2151 
2152 	if (0 == duty100) {
2153 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2154 		return 0;
2155 	}
2156 
2157 	tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2158 	do_div(tmp64, 10000);
2159 	fdo_min = (uint16_t)tmp64;
2160 
2161 	t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2162 	t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2163 
2164 	pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2165 	pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2166 
2167 	slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2168 	slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2169 
2170 	fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2171 	fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2172 	fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2173 
2174 	fan_table.Slope1 = cpu_to_be16(slope1);
2175 	fan_table.Slope2 = cpu_to_be16(slope2);
2176 
2177 	fan_table.FdoMin = cpu_to_be16(fdo_min);
2178 
2179 	fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2180 
2181 	fan_table.HystUp = cpu_to_be16(1);
2182 
2183 	fan_table.HystSlope = cpu_to_be16(1);
2184 
2185 	fan_table.TempRespLim = cpu_to_be16(5);
2186 
2187 	reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
2188 
2189 	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2190 
2191 	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2192 
2193 	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2194 
2195 	res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2196 
2197 	return res;
2198 }
2199 
2200 static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2201 {
2202 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2203 
2204 	if (data->need_update_smu7_dpm_table &
2205 			(DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK))
2206 		return ci_program_memory_timing_parameters(hwmgr);
2207 
2208 	return 0;
2209 }
2210 
2211 static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2212 {
2213 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2214 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2215 
2216 	int result = 0;
2217 	uint32_t low_sclk_interrupt_threshold = 0;
2218 
2219 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2220 			PHM_PlatformCaps_SclkThrottleLowNotification)
2221 		&& (data->low_sclk_interrupt_threshold != 0)) {
2222 		low_sclk_interrupt_threshold =
2223 				data->low_sclk_interrupt_threshold;
2224 
2225 		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2226 
2227 		result = ci_copy_bytes_to_smc(
2228 				hwmgr,
2229 				smu_data->dpm_table_start +
2230 				offsetof(SMU7_Discrete_DpmTable,
2231 					LowSclkInterruptT),
2232 				(uint8_t *)&low_sclk_interrupt_threshold,
2233 				sizeof(uint32_t),
2234 				SMC_RAM_END);
2235 	}
2236 
2237 	result = ci_update_and_upload_mc_reg_table(hwmgr);
2238 
2239 	PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2240 
2241 	result = ci_program_mem_timing_parameters(hwmgr);
2242 	PP_ASSERT_WITH_CODE((result == 0),
2243 			"Failed to program memory timing parameters!",
2244 			);
2245 
2246 	return result;
2247 }
2248 
2249 static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2250 {
2251 	switch (type) {
2252 	case SMU_SoftRegisters:
2253 		switch (member) {
2254 		case HandshakeDisables:
2255 			return offsetof(SMU7_SoftRegisters, HandshakeDisables);
2256 		case VoltageChangeTimeout:
2257 			return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
2258 		case AverageGraphicsActivity:
2259 			return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
2260 		case AverageMemoryActivity:
2261 			return offsetof(SMU7_SoftRegisters, AverageMemoryA);
2262 		case PreVBlankGap:
2263 			return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2264 		case VBlankTimeout:
2265 			return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2266 		case DRAM_LOG_ADDR_H:
2267 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
2268 		case DRAM_LOG_ADDR_L:
2269 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
2270 		case DRAM_LOG_PHY_ADDR_H:
2271 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2272 		case DRAM_LOG_PHY_ADDR_L:
2273 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2274 		case DRAM_LOG_BUFF_SIZE:
2275 			return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2276 		}
2277 		break;
2278 	case SMU_Discrete_DpmTable:
2279 		switch (member) {
2280 		case LowSclkInterruptThreshold:
2281 			return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
2282 		}
2283 		break;
2284 	}
2285 	pr_debug("can't get the offset of type %x member %x\n", type, member);
2286 	return 0;
2287 }
2288 
2289 static uint32_t ci_get_mac_definition(uint32_t value)
2290 {
2291 	switch (value) {
2292 	case SMU_MAX_LEVELS_GRAPHICS:
2293 		return SMU7_MAX_LEVELS_GRAPHICS;
2294 	case SMU_MAX_LEVELS_MEMORY:
2295 		return SMU7_MAX_LEVELS_MEMORY;
2296 	case SMU_MAX_LEVELS_LINK:
2297 		return SMU7_MAX_LEVELS_LINK;
2298 	case SMU_MAX_ENTRIES_SMIO:
2299 		return SMU7_MAX_ENTRIES_SMIO;
2300 	case SMU_MAX_LEVELS_VDDC:
2301 		return SMU7_MAX_LEVELS_VDDC;
2302 	case SMU_MAX_LEVELS_VDDCI:
2303 		return SMU7_MAX_LEVELS_VDDCI;
2304 	case SMU_MAX_LEVELS_MVDD:
2305 		return SMU7_MAX_LEVELS_MVDD;
2306 	}
2307 
2308 	pr_debug("can't get the mac of %x\n", value);
2309 	return 0;
2310 }
2311 
2312 static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
2313 {
2314 	uint32_t byte_count, start_addr;
2315 	uint8_t *src;
2316 	uint32_t data;
2317 
2318 	struct cgs_firmware_info info = {0};
2319 
2320 	cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
2321 
2322 	hwmgr->is_kicker = info.is_kicker;
2323 	hwmgr->smu_version = info.version;
2324 	byte_count = info.image_size;
2325 	src = (uint8_t *)info.kptr;
2326 	start_addr = info.ucode_start_address;
2327 
2328 	if  (byte_count > SMC_RAM_END) {
2329 		pr_err("SMC address is beyond the SMC RAM area.\n");
2330 		return -EINVAL;
2331 	}
2332 
2333 	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
2334 	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
2335 
2336 	for (; byte_count >= 4; byte_count -= 4) {
2337 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
2338 		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
2339 		src += 4;
2340 	}
2341 	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
2342 
2343 	if (0 != byte_count) {
2344 		pr_err("SMC size must be divisible by 4\n");
2345 		return -EINVAL;
2346 	}
2347 
2348 	return 0;
2349 }
2350 
2351 static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
2352 {
2353 	if (ci_is_smc_ram_running(hwmgr)) {
2354 		pr_info("smc is running, no need to load smc firmware\n");
2355 		return 0;
2356 	}
2357 	PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
2358 			boot_seq_done, 1);
2359 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
2360 			pre_fetcher_en, 1);
2361 
2362 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
2363 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
2364 	return ci_load_smc_ucode(hwmgr);
2365 }
2366 
2367 static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
2368 {
2369 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2370 	struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2371 
2372 	uint32_t tmp = 0;
2373 	int result;
2374 	bool error = false;
2375 
2376 	if (ci_upload_firmware(hwmgr))
2377 		return -EINVAL;
2378 
2379 	result = ci_read_smc_sram_dword(hwmgr,
2380 				SMU7_FIRMWARE_HEADER_LOCATION +
2381 				offsetof(SMU7_Firmware_Header, DpmTable),
2382 				&tmp, SMC_RAM_END);
2383 
2384 	if (0 == result)
2385 		ci_data->dpm_table_start = tmp;
2386 
2387 	error |= (0 != result);
2388 
2389 	result = ci_read_smc_sram_dword(hwmgr,
2390 				SMU7_FIRMWARE_HEADER_LOCATION +
2391 				offsetof(SMU7_Firmware_Header, SoftRegisters),
2392 				&tmp, SMC_RAM_END);
2393 
2394 	if (0 == result) {
2395 		data->soft_regs_start = tmp;
2396 		ci_data->soft_regs_start = tmp;
2397 	}
2398 
2399 	error |= (0 != result);
2400 
2401 	result = ci_read_smc_sram_dword(hwmgr,
2402 				SMU7_FIRMWARE_HEADER_LOCATION +
2403 				offsetof(SMU7_Firmware_Header, mcRegisterTable),
2404 				&tmp, SMC_RAM_END);
2405 
2406 	if (0 == result)
2407 		ci_data->mc_reg_table_start = tmp;
2408 
2409 	result = ci_read_smc_sram_dword(hwmgr,
2410 				SMU7_FIRMWARE_HEADER_LOCATION +
2411 				offsetof(SMU7_Firmware_Header, FanTable),
2412 				&tmp, SMC_RAM_END);
2413 
2414 	if (0 == result)
2415 		ci_data->fan_table_start = tmp;
2416 
2417 	error |= (0 != result);
2418 
2419 	result = ci_read_smc_sram_dword(hwmgr,
2420 				SMU7_FIRMWARE_HEADER_LOCATION +
2421 				offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
2422 				&tmp, SMC_RAM_END);
2423 
2424 	if (0 == result)
2425 		ci_data->arb_table_start = tmp;
2426 
2427 	error |= (0 != result);
2428 
2429 	result = ci_read_smc_sram_dword(hwmgr,
2430 				SMU7_FIRMWARE_HEADER_LOCATION +
2431 				offsetof(SMU7_Firmware_Header, Version),
2432 				&tmp, SMC_RAM_END);
2433 
2434 	if (0 == result)
2435 		hwmgr->microcode_version_info.SMC = tmp;
2436 
2437 	error |= (0 != result);
2438 
2439 	return error ? 1 : 0;
2440 }
2441 
2442 static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2443 {
2444 	return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2445 }
2446 
2447 static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2448 {
2449 	bool result = true;
2450 
2451 	switch (in_reg) {
2452 	case  mmMC_SEQ_RAS_TIMING:
2453 		*out_reg = mmMC_SEQ_RAS_TIMING_LP;
2454 		break;
2455 
2456 	case  mmMC_SEQ_DLL_STBY:
2457 		*out_reg = mmMC_SEQ_DLL_STBY_LP;
2458 		break;
2459 
2460 	case  mmMC_SEQ_G5PDX_CMD0:
2461 		*out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2462 		break;
2463 
2464 	case  mmMC_SEQ_G5PDX_CMD1:
2465 		*out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2466 		break;
2467 
2468 	case  mmMC_SEQ_G5PDX_CTRL:
2469 		*out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2470 		break;
2471 
2472 	case mmMC_SEQ_CAS_TIMING:
2473 		*out_reg = mmMC_SEQ_CAS_TIMING_LP;
2474 		break;
2475 
2476 	case mmMC_SEQ_MISC_TIMING:
2477 		*out_reg = mmMC_SEQ_MISC_TIMING_LP;
2478 		break;
2479 
2480 	case mmMC_SEQ_MISC_TIMING2:
2481 		*out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2482 		break;
2483 
2484 	case mmMC_SEQ_PMG_DVS_CMD:
2485 		*out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2486 		break;
2487 
2488 	case mmMC_SEQ_PMG_DVS_CTL:
2489 		*out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2490 		break;
2491 
2492 	case mmMC_SEQ_RD_CTL_D0:
2493 		*out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2494 		break;
2495 
2496 	case mmMC_SEQ_RD_CTL_D1:
2497 		*out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2498 		break;
2499 
2500 	case mmMC_SEQ_WR_CTL_D0:
2501 		*out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2502 		break;
2503 
2504 	case mmMC_SEQ_WR_CTL_D1:
2505 		*out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2506 		break;
2507 
2508 	case mmMC_PMG_CMD_EMRS:
2509 		*out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2510 		break;
2511 
2512 	case mmMC_PMG_CMD_MRS:
2513 		*out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2514 		break;
2515 
2516 	case mmMC_PMG_CMD_MRS1:
2517 		*out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2518 		break;
2519 
2520 	case mmMC_SEQ_PMG_TIMING:
2521 		*out_reg = mmMC_SEQ_PMG_TIMING_LP;
2522 		break;
2523 
2524 	case mmMC_PMG_CMD_MRS2:
2525 		*out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2526 		break;
2527 
2528 	case mmMC_SEQ_WR_CTL_2:
2529 		*out_reg = mmMC_SEQ_WR_CTL_2_LP;
2530 		break;
2531 
2532 	default:
2533 		result = false;
2534 		break;
2535 	}
2536 
2537 	return result;
2538 }
2539 
2540 static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
2541 {
2542 	uint32_t i;
2543 	uint16_t address;
2544 
2545 	for (i = 0; i < table->last; i++) {
2546 		table->mc_reg_address[i].s0 =
2547 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2548 			? address : table->mc_reg_address[i].s1;
2549 	}
2550 	return 0;
2551 }
2552 
2553 static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2554 					struct ci_mc_reg_table *ni_table)
2555 {
2556 	uint8_t i, j;
2557 
2558 	PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2559 		"Invalid VramInfo table.", return -EINVAL);
2560 	PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2561 		"Invalid VramInfo table.", return -EINVAL);
2562 
2563 	for (i = 0; i < table->last; i++)
2564 		ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2565 
2566 	ni_table->last = table->last;
2567 
2568 	for (i = 0; i < table->num_entries; i++) {
2569 		ni_table->mc_reg_table_entry[i].mclk_max =
2570 			table->mc_reg_table_entry[i].mclk_max;
2571 		for (j = 0; j < table->last; j++) {
2572 			ni_table->mc_reg_table_entry[i].mc_data[j] =
2573 				table->mc_reg_table_entry[i].mc_data[j];
2574 		}
2575 	}
2576 
2577 	ni_table->num_entries = table->num_entries;
2578 
2579 	return 0;
2580 }
2581 
2582 static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2583 					struct ci_mc_reg_table *table)
2584 {
2585 	uint8_t i, j, k;
2586 	uint32_t temp_reg;
2587 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2588 
2589 	for (i = 0, j = table->last; i < table->last; i++) {
2590 		PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2591 			"Invalid VramInfo table.", return -EINVAL);
2592 
2593 		switch (table->mc_reg_address[i].s1) {
2594 
2595 		case mmMC_SEQ_MISC1:
2596 			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2597 			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2598 			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2599 			for (k = 0; k < table->num_entries; k++) {
2600 				table->mc_reg_table_entry[k].mc_data[j] =
2601 					((temp_reg & 0xffff0000)) |
2602 					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2603 			}
2604 			j++;
2605 
2606 			PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2607 				"Invalid VramInfo table.", return -EINVAL);
2608 			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2609 			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2610 			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2611 			for (k = 0; k < table->num_entries; k++) {
2612 				table->mc_reg_table_entry[k].mc_data[j] =
2613 					(temp_reg & 0xffff0000) |
2614 					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2615 
2616 				if (!data->is_memory_gddr5)
2617 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2618 			}
2619 			j++;
2620 
2621 			if (!data->is_memory_gddr5) {
2622 				PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2623 					"Invalid VramInfo table.", return -EINVAL);
2624 				table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2625 				table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2626 				for (k = 0; k < table->num_entries; k++) {
2627 					table->mc_reg_table_entry[k].mc_data[j] =
2628 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2629 				}
2630 				j++;
2631 			}
2632 
2633 			break;
2634 
2635 		case mmMC_SEQ_RESERVE_M:
2636 			temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2637 			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2638 			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2639 			for (k = 0; k < table->num_entries; k++) {
2640 				table->mc_reg_table_entry[k].mc_data[j] =
2641 					(temp_reg & 0xffff0000) |
2642 					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2643 			}
2644 			j++;
2645 			break;
2646 
2647 		default:
2648 			break;
2649 		}
2650 
2651 	}
2652 
2653 	table->last = j;
2654 
2655 	return 0;
2656 }
2657 
2658 static int ci_set_valid_flag(struct ci_mc_reg_table *table)
2659 {
2660 	uint8_t i, j;
2661 
2662 	for (i = 0; i < table->last; i++) {
2663 		for (j = 1; j < table->num_entries; j++) {
2664 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2665 				table->mc_reg_table_entry[j].mc_data[i]) {
2666 				table->validflag |= (1 << i);
2667 				break;
2668 			}
2669 		}
2670 	}
2671 
2672 	return 0;
2673 }
2674 
2675 static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2676 {
2677 	int result;
2678 	struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2679 	pp_atomctrl_mc_reg_table *table;
2680 	struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2681 	uint8_t module_index = ci_get_memory_modile_index(hwmgr);
2682 
2683 	table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2684 
2685 	if (NULL == table)
2686 		return -ENOMEM;
2687 
2688 	/* Program additional LP registers that are no longer programmed by VBIOS */
2689 	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2690 	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2691 	cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2692 	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2693 	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2694 	cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2695 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2696 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2697 	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2698 	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2699 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2700 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2701 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2702 	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2703 	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2704 	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2705 	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2706 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2707 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2708 	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2709 
2710 	result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2711 
2712 	if (0 == result)
2713 		result = ci_copy_vbios_smc_reg_table(table, ni_table);
2714 
2715 	if (0 == result) {
2716 		ci_set_s0_mc_reg_index(ni_table);
2717 		result = ci_set_mc_special_registers(hwmgr, ni_table);
2718 	}
2719 
2720 	if (0 == result)
2721 		ci_set_valid_flag(ni_table);
2722 
2723 	kfree(table);
2724 
2725 	return result;
2726 }
2727 
2728 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2729 {
2730 	return ci_is_smc_ram_running(hwmgr);
2731 }
2732 
2733 static int ci_smu_init(struct pp_hwmgr *hwmgr)
2734 {
2735 	struct ci_smumgr *ci_priv;
2736 
2737 	ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
2738 
2739 	if (ci_priv == NULL)
2740 		return -ENOMEM;
2741 
2742 	hwmgr->smu_backend = ci_priv;
2743 
2744 	return 0;
2745 }
2746 
2747 static int ci_smu_fini(struct pp_hwmgr *hwmgr)
2748 {
2749 	kfree(hwmgr->smu_backend);
2750 	hwmgr->smu_backend = NULL;
2751 	return 0;
2752 }
2753 
2754 static int ci_start_smu(struct pp_hwmgr *hwmgr)
2755 {
2756 	return 0;
2757 }
2758 
2759 static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
2760 				void *profile_setting)
2761 {
2762 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2763 	struct ci_smumgr *smu_data = (struct ci_smumgr *)
2764 			(hwmgr->smu_backend);
2765 	struct profile_mode_setting *setting;
2766 	struct SMU7_Discrete_GraphicsLevel *levels =
2767 			smu_data->smc_state_table.GraphicsLevel;
2768 	uint32_t array = smu_data->dpm_table_start +
2769 			offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2770 
2771 	uint32_t mclk_array = smu_data->dpm_table_start +
2772 			offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2773 	struct SMU7_Discrete_MemoryLevel *mclk_levels =
2774 			smu_data->smc_state_table.MemoryLevel;
2775 	uint32_t i;
2776 	uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2777 
2778 	if (profile_setting == NULL)
2779 		return -EINVAL;
2780 
2781 	setting = (struct profile_mode_setting *)profile_setting;
2782 
2783 	if (setting->bupdate_sclk) {
2784 		if (!data->sclk_dpm_key_disabled)
2785 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
2786 		for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2787 			if (levels[i].ActivityLevel !=
2788 				cpu_to_be16(setting->sclk_activity)) {
2789 				levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2790 
2791 				clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2792 						+ offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel);
2793 				offset = clk_activity_offset & ~0x3;
2794 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2795 				tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2796 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2797 
2798 			}
2799 			if (levels[i].UpH != setting->sclk_up_hyst ||
2800 				levels[i].DownH != setting->sclk_down_hyst) {
2801 				levels[i].UpH = setting->sclk_up_hyst;
2802 				levels[i].DownH = setting->sclk_down_hyst;
2803 				up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2804 						+ offsetof(SMU7_Discrete_GraphicsLevel, UpH);
2805 				down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2806 						+ offsetof(SMU7_Discrete_GraphicsLevel, DownH);
2807 				offset = up_hyst_offset & ~0x3;
2808 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2809 				tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t));
2810 				tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t));
2811 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2812 			}
2813 		}
2814 		if (!data->sclk_dpm_key_disabled)
2815 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
2816 	}
2817 
2818 	if (setting->bupdate_mclk) {
2819 		if (!data->mclk_dpm_key_disabled)
2820 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
2821 		for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2822 			if (mclk_levels[i].ActivityLevel !=
2823 				cpu_to_be16(setting->mclk_activity)) {
2824 				mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2825 
2826 				clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2827 						+ offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel);
2828 				offset = clk_activity_offset & ~0x3;
2829 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2830 				tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2831 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2832 
2833 			}
2834 			if (mclk_levels[i].UpH != setting->mclk_up_hyst ||
2835 				mclk_levels[i].DownH != setting->mclk_down_hyst) {
2836 				mclk_levels[i].UpH = setting->mclk_up_hyst;
2837 				mclk_levels[i].DownH = setting->mclk_down_hyst;
2838 				up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2839 						+ offsetof(SMU7_Discrete_MemoryLevel, UpH);
2840 				down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2841 						+ offsetof(SMU7_Discrete_MemoryLevel, DownH);
2842 				offset = up_hyst_offset & ~0x3;
2843 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2844 				tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t));
2845 				tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t));
2846 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2847 			}
2848 		}
2849 		if (!data->mclk_dpm_key_disabled)
2850 			smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
2851 	}
2852 	return 0;
2853 }
2854 
2855 static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2856 {
2857 	struct amdgpu_device *adev = hwmgr->adev;
2858 	struct smu7_hwmgr *data = hwmgr->backend;
2859 	struct ci_smumgr *smu_data = hwmgr->smu_backend;
2860 	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
2861 			hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
2862 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2863 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2864 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2865 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2866 	uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2867 						hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2868 	int32_t i;
2869 
2870 	if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
2871 		smu_data->smc_state_table.UvdBootLevel = 0;
2872 	else
2873 		smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
2874 
2875 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2876 				UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
2877 
2878 	data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
2879 
2880 	for (i = uvd_table->count - 1; i >= 0; i--) {
2881 		if (uvd_table->entries[i].v <= max_vddc)
2882 			data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
2883 		if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
2884 			break;
2885 	}
2886 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
2887 				data->dpm_level_enable_mask.uvd_dpm_enable_mask,
2888 				NULL);
2889 
2890 	return 0;
2891 }
2892 
2893 static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2894 {
2895 	struct amdgpu_device *adev = hwmgr->adev;
2896 	struct smu7_hwmgr *data = hwmgr->backend;
2897 	struct phm_vce_clock_voltage_dependency_table *vce_table =
2898 			hwmgr->dyn_state.vce_clock_voltage_dependency_table;
2899 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2900 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2901 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2902 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2903 	uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2904 						hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2905 	int32_t i;
2906 
2907 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2908 				VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
2909 
2910 	data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
2911 
2912 	for (i = vce_table->count - 1; i >= 0; i--) {
2913 		if (vce_table->entries[i].v <= max_vddc)
2914 			data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
2915 		if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
2916 			break;
2917 	}
2918 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
2919 				data->dpm_level_enable_mask.vce_dpm_enable_mask,
2920 				NULL);
2921 
2922 	return 0;
2923 }
2924 
2925 static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2926 {
2927 	switch (type) {
2928 	case SMU_UVD_TABLE:
2929 		ci_update_uvd_smc_table(hwmgr);
2930 		break;
2931 	case SMU_VCE_TABLE:
2932 		ci_update_vce_smc_table(hwmgr);
2933 		break;
2934 	default:
2935 		break;
2936 	}
2937 	return 0;
2938 }
2939 
2940 static void ci_reset_smc(struct pp_hwmgr *hwmgr)
2941 {
2942 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2943 				  SMC_SYSCON_RESET_CNTL,
2944 				  rst_reg, 1);
2945 }
2946 
2947 
2948 static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
2949 {
2950 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2951 				  SMC_SYSCON_CLOCK_CNTL_0,
2952 				  ck_disable, 1);
2953 }
2954 
2955 static int ci_stop_smc(struct pp_hwmgr *hwmgr)
2956 {
2957 	ci_reset_smc(hwmgr);
2958 	ci_stop_smc_clock(hwmgr);
2959 
2960 	return 0;
2961 }
2962 
2963 const struct pp_smumgr_func ci_smu_funcs = {
2964 	.name = "ci_smu",
2965 	.smu_init = ci_smu_init,
2966 	.smu_fini = ci_smu_fini,
2967 	.start_smu = ci_start_smu,
2968 	.check_fw_load_finish = NULL,
2969 	.request_smu_load_fw = NULL,
2970 	.request_smu_load_specific_fw = NULL,
2971 	.send_msg_to_smc = ci_send_msg_to_smc,
2972 	.send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
2973 	.get_argument = smu7_get_argument,
2974 	.download_pptable_settings = NULL,
2975 	.upload_pptable_settings = NULL,
2976 	.get_offsetof = ci_get_offsetof,
2977 	.process_firmware_header = ci_process_firmware_header,
2978 	.init_smc_table = ci_init_smc_table,
2979 	.update_sclk_threshold = ci_update_sclk_threshold,
2980 	.thermal_setup_fan_table = ci_thermal_setup_fan_table,
2981 	.populate_all_graphic_levels = ci_populate_all_graphic_levels,
2982 	.populate_all_memory_levels = ci_populate_all_memory_levels,
2983 	.get_mac_definition = ci_get_mac_definition,
2984 	.initialize_mc_reg_table = ci_initialize_mc_reg_table,
2985 	.is_dpm_running = ci_is_dpm_running,
2986 	.update_dpm_settings = ci_update_dpm_settings,
2987 	.update_smc_table = ci_update_smc_table,
2988 	.stop_smc = ci_stop_smc,
2989 };
2990