xref: /linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c (revision df9c299371054cb725eef730fd0f1d0fe2ed6bb0)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include "atom.h"
28 #include "ppatomctrl.h"
29 #include "atombios.h"
30 #include "cgs_common.h"
31 
32 #define MEM_ID_MASK           0xff000000
33 #define MEM_ID_SHIFT          24
34 #define CLOCK_RANGE_MASK      0x00ffffff
35 #define CLOCK_RANGE_SHIFT     0
36 #define LOW_NIBBLE_MASK       0xf
37 #define DATA_EQU_PREV         0
38 #define DATA_FROM_TABLE       4
39 
40 union voltage_object_info {
41 	struct _ATOM_VOLTAGE_OBJECT_INFO v1;
42 	struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
43 	struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
44 };
45 
46 static int atomctrl_retrieve_ac_timing(
47 		uint8_t index,
48 		ATOM_INIT_REG_BLOCK *reg_block,
49 		pp_atomctrl_mc_reg_table *table)
50 {
51 	uint32_t i, j;
52 	uint8_t tmem_id;
53 	ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
54 		((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize));
55 
56 	uint8_t num_ranges = 0;
57 
58 	while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK &&
59 			num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES) {
60 		tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
61 
62 		if (index == tmem_id) {
63 			table->mc_reg_table_entry[num_ranges].mclk_max =
64 				(uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK) >>
65 						CLOCK_RANGE_SHIFT);
66 
67 			for (i = 0, j = 1; i < table->last; i++) {
68 				if ((table->mc_reg_address[i].uc_pre_reg_data &
69 							LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
70 					table->mc_reg_table_entry[num_ranges].mc_data[i] =
71 						(uint32_t)*((uint32_t *)reg_data + j);
72 					j++;
73 				} else if ((table->mc_reg_address[i].uc_pre_reg_data &
74 							LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
75 					if (i)
76 						table->mc_reg_table_entry[num_ranges].mc_data[i] =
77 							table->mc_reg_table_entry[num_ranges].mc_data[i-1];
78 				}
79 			}
80 			num_ranges++;
81 		}
82 
83 		reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
84 			((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)) ;
85 	}
86 
87 	PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),
88 			"Invalid VramInfo table.", return -1);
89 	table->num_entries = num_ranges;
90 
91 	return 0;
92 }
93 
94 /**
95  * atomctrl_set_mc_reg_address_table - Get memory clock AC timing registers index from VBIOS table
96  * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1
97  * @reg_block: the address ATOM_INIT_REG_BLOCK
98  * @table: the address of MCRegTable
99  * Return:   0
100  */
101 static int atomctrl_set_mc_reg_address_table(
102 		ATOM_INIT_REG_BLOCK *reg_block,
103 		pp_atomctrl_mc_reg_table *table)
104 {
105 	uint8_t i = 0;
106 	uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize))
107 			/ sizeof(ATOM_INIT_REG_INDEX_FORMAT));
108 	ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
109 
110 	num_entries--;        /* subtract 1 data end mark entry */
111 
112 	PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),
113 			"Invalid VramInfo table.", return -1);
114 
115 	/* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */
116 	while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER)) &&
117 			(i < num_entries)) {
118 		table->mc_reg_address[i].s1 =
119 			(uint16_t)(le16_to_cpu(format->usRegIndex));
120 		table->mc_reg_address[i].uc_pre_reg_data =
121 			format->ucPreRegDataLength;
122 
123 		i++;
124 		format = (ATOM_INIT_REG_INDEX_FORMAT *)
125 			((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
126 	}
127 
128 	table->last = i;
129 	return 0;
130 }
131 
132 int atomctrl_initialize_mc_reg_table(
133 		struct pp_hwmgr *hwmgr,
134 		uint8_t module_index,
135 		pp_atomctrl_mc_reg_table *table)
136 {
137 	ATOM_VRAM_INFO_HEADER_V2_1 *vram_info;
138 	ATOM_INIT_REG_BLOCK *reg_block;
139 	int result = 0;
140 	u8 frev, crev;
141 	u16 size;
142 
143 	vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
144 		smu_atom_get_data_table(hwmgr->adev,
145 				GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
146 	if (!vram_info) {
147 		pr_err("Could not retrieve the VramInfo table!");
148 		return -EINVAL;
149 	}
150 
151 	if (module_index >= vram_info->ucNumOfVRAMModule) {
152 		pr_err("Invalid VramInfo table.");
153 		result = -1;
154 	} else if (vram_info->sHeader.ucTableFormatRevision < 2) {
155 		pr_err("Invalid VramInfo table.");
156 		result = -1;
157 	}
158 
159 	if (0 == result) {
160 		reg_block = (ATOM_INIT_REG_BLOCK *)
161 			((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
162 		result = atomctrl_set_mc_reg_address_table(reg_block, table);
163 	}
164 
165 	if (0 == result) {
166 		result = atomctrl_retrieve_ac_timing(module_index,
167 					reg_block, table);
168 	}
169 
170 	return result;
171 }
172 
173 int atomctrl_initialize_mc_reg_table_v2_2(
174 		struct pp_hwmgr *hwmgr,
175 		uint8_t module_index,
176 		pp_atomctrl_mc_reg_table *table)
177 {
178 	ATOM_VRAM_INFO_HEADER_V2_2 *vram_info;
179 	ATOM_INIT_REG_BLOCK *reg_block;
180 	int result = 0;
181 	u8 frev, crev;
182 	u16 size;
183 
184 	vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
185 		smu_atom_get_data_table(hwmgr->adev,
186 				GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
187 	if (!vram_info) {
188 		pr_err("Could not retrieve the VramInfo table!");
189 		return -EINVAL;
190 	}
191 
192 	if (module_index >= vram_info->ucNumOfVRAMModule) {
193 		pr_err("Invalid VramInfo table.");
194 		result = -1;
195 	} else if (vram_info->sHeader.ucTableFormatRevision < 2) {
196 		pr_err("Invalid VramInfo table.");
197 		result = -1;
198 	}
199 
200 	if (0 == result) {
201 		reg_block = (ATOM_INIT_REG_BLOCK *)
202 			((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
203 		result = atomctrl_set_mc_reg_address_table(reg_block, table);
204 	}
205 
206 	if (0 == result) {
207 		result = atomctrl_retrieve_ac_timing(module_index,
208 					reg_block, table);
209 	}
210 
211 	return result;
212 }
213 
214 /*
215  * Set DRAM timings based on engine clock and memory clock.
216  */
217 int atomctrl_set_engine_dram_timings_rv770(
218 		struct pp_hwmgr *hwmgr,
219 		uint32_t engine_clock,
220 		uint32_t memory_clock)
221 {
222 	struct amdgpu_device *adev = hwmgr->adev;
223 
224 	SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
225 
226 	/* They are both in 10KHz Units. */
227 	engine_clock_parameters.ulTargetEngineClock =
228 		cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
229 			    ((COMPUTE_ENGINE_PLL_PARAM << 24)));
230 
231 	/* in 10 khz units.*/
232 	engine_clock_parameters.sReserved.ulClock =
233 		cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
234 
235 	return amdgpu_atom_execute_table(adev->mode_info.atom_context,
236 			GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
237 			(uint32_t *)&engine_clock_parameters, sizeof(engine_clock_parameters));
238 }
239 
240 /*
241  * Private Function to get the PowerPlay Table Address.
242  * WARNING: The tabled returned by this function is in
243  * dynamically allocated memory.
244  * The caller has to release if by calling kfree.
245  */
246 static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
247 {
248 	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
249 	u8 frev, crev;
250 	u16 size;
251 	union voltage_object_info *voltage_info;
252 
253 	voltage_info = (union voltage_object_info *)
254 		smu_atom_get_data_table(device, index,
255 			&size, &frev, &crev);
256 
257 	if (voltage_info != NULL)
258 		return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3);
259 	else
260 		return NULL;
261 }
262 
263 static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
264 		const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,
265 		uint8_t voltage_type, uint8_t voltage_mode)
266 {
267 	unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize);
268 	unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
269 	uint8_t *start = (uint8_t *)voltage_object_info_table;
270 
271 	while (offset < size) {
272 		const ATOM_VOLTAGE_OBJECT_V3 *voltage_object =
273 			(const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
274 
275 		if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType &&
276 			voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode)
277 			return voltage_object;
278 
279 		offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize);
280 	}
281 
282 	return NULL;
283 }
284 
285 /**
286  * atomctrl_get_memory_pll_dividers_si
287  *
288  * @hwmgr:           input parameter: pointer to HwMgr
289  * @clock_value:     input parameter: memory clock
290  * @mpll_param:      output parameter: memory clock parameters
291  * @strobe_mode:     input parameter: 1 for strobe mode,  0 for performance mode
292  */
293 int atomctrl_get_memory_pll_dividers_si(
294 		struct pp_hwmgr *hwmgr,
295 		uint32_t clock_value,
296 		pp_atomctrl_memory_clock_param *mpll_param,
297 		bool strobe_mode)
298 {
299 	struct amdgpu_device *adev = hwmgr->adev;
300 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
301 	int result;
302 
303 	mpll_parameters.ulClock = cpu_to_le32(clock_value);
304 	mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
305 
306 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
307 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
308 		(uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
309 
310 	if (0 == result) {
311 		mpll_param->mpll_fb_divider.clk_frac =
312 			le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
313 		mpll_param->mpll_fb_divider.cl_kf =
314 			le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
315 		mpll_param->mpll_post_divider =
316 			(uint32_t)mpll_parameters.ucPostDiv;
317 		mpll_param->vco_mode =
318 			(uint32_t)(mpll_parameters.ucPllCntlFlag &
319 					MPLL_CNTL_FLAG_VCO_MODE_MASK);
320 		mpll_param->yclk_sel =
321 			(uint32_t)((mpll_parameters.ucPllCntlFlag &
322 						MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0);
323 		mpll_param->qdr =
324 			(uint32_t)((mpll_parameters.ucPllCntlFlag &
325 						MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0);
326 		mpll_param->half_rate =
327 			(uint32_t)((mpll_parameters.ucPllCntlFlag &
328 						MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0);
329 		mpll_param->dll_speed =
330 			(uint32_t)(mpll_parameters.ucDllSpeed);
331 		mpll_param->bw_ctrl =
332 			(uint32_t)(mpll_parameters.ucBWCntl);
333 	}
334 
335 	return result;
336 }
337 
338 /**
339  * atomctrl_get_memory_pll_dividers_vi
340  *
341  * @hwmgr:                 input parameter: pointer to HwMgr
342  * @clock_value:           input parameter: memory clock
343  * @mpll_param:            output parameter: memory clock parameters
344  */
345 int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
346 		uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
347 {
348 	struct amdgpu_device *adev = hwmgr->adev;
349 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
350 	int result;
351 
352 	mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
353 
354 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
355 			GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
356 			(uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
357 
358 	if (!result)
359 		mpll_param->mpll_post_divider =
360 				(uint32_t)mpll_parameters.ulClock.ucPostDiv;
361 
362 	return result;
363 }
364 
365 int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
366 					uint32_t clock_value,
367 					pp_atomctrl_memory_clock_param_ai *mpll_param)
368 {
369 	struct amdgpu_device *adev = hwmgr->adev;
370 	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0};
371 	int result;
372 
373 	mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
374 
375 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
376 			GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
377 			(uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
378 
379 	/* VEGAM's mpll takes sometime to finish computing */
380 	udelay(10);
381 
382 	if (!result) {
383 		mpll_param->ulMclk_fcw_int =
384 			le16_to_cpu(mpll_parameters.usMclk_fcw_int);
385 		mpll_param->ulMclk_fcw_frac =
386 			le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
387 		mpll_param->ulClock =
388 			le32_to_cpu(mpll_parameters.ulClock.ulClock);
389 		mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
390 	}
391 
392 	return result;
393 }
394 
395 int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
396 					  uint32_t clock_value,
397 					  pp_atomctrl_clock_dividers_kong *dividers)
398 {
399 	struct amdgpu_device *adev = hwmgr->adev;
400 	COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
401 	int result;
402 
403 	pll_parameters.ulClock = cpu_to_le32(clock_value);
404 
405 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
406 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
407 		(uint32_t *)&pll_parameters, sizeof(pll_parameters));
408 
409 	if (0 == result) {
410 		dividers->pll_post_divider = pll_parameters.ucPostDiv;
411 		dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
412 	}
413 
414 	return result;
415 }
416 
417 int atomctrl_get_engine_pll_dividers_vi(
418 		struct pp_hwmgr *hwmgr,
419 		uint32_t clock_value,
420 		pp_atomctrl_clock_dividers_vi *dividers)
421 {
422 	struct amdgpu_device *adev = hwmgr->adev;
423 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
424 	int result;
425 
426 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
427 	pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
428 
429 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
430 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
431 		(uint32_t *)&pll_patameters, sizeof(pll_patameters));
432 
433 	if (0 == result) {
434 		dividers->pll_post_divider =
435 			pll_patameters.ulClock.ucPostDiv;
436 		dividers->real_clock =
437 			le32_to_cpu(pll_patameters.ulClock.ulClock);
438 
439 		dividers->ul_fb_div.ul_fb_div_frac =
440 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
441 		dividers->ul_fb_div.ul_fb_div =
442 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
443 
444 		dividers->uc_pll_ref_div =
445 			pll_patameters.ucPllRefDiv;
446 		dividers->uc_pll_post_div =
447 			pll_patameters.ucPllPostDiv;
448 		dividers->uc_pll_cntl_flag =
449 			pll_patameters.ucPllCntlFlag;
450 	}
451 
452 	return result;
453 }
454 
455 int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
456 		uint32_t clock_value,
457 		pp_atomctrl_clock_dividers_ai *dividers)
458 {
459 	struct amdgpu_device *adev = hwmgr->adev;
460 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
461 	int result;
462 
463 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
464 	pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
465 
466 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
467 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
468 		(uint32_t *)&pll_patameters, sizeof(pll_patameters));
469 
470 	if (0 == result) {
471 		dividers->usSclk_fcw_frac     = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
472 		dividers->usSclk_fcw_int      = le16_to_cpu(pll_patameters.usSclk_fcw_int);
473 		dividers->ucSclkPostDiv       = pll_patameters.ucSclkPostDiv;
474 		dividers->ucSclkVcoMode       = pll_patameters.ucSclkVcoMode;
475 		dividers->ucSclkPllRange      = pll_patameters.ucSclkPllRange;
476 		dividers->ucSscEnable         = pll_patameters.ucSscEnable;
477 		dividers->usSsc_fcw1_frac     = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
478 		dividers->usSsc_fcw1_int      = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
479 		dividers->usPcc_fcw_int       = le16_to_cpu(pll_patameters.usPcc_fcw_int);
480 		dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
481 		dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
482 	}
483 	return result;
484 }
485 
486 int atomctrl_get_dfs_pll_dividers_vi(
487 		struct pp_hwmgr *hwmgr,
488 		uint32_t clock_value,
489 		pp_atomctrl_clock_dividers_vi *dividers)
490 {
491 	struct amdgpu_device *adev = hwmgr->adev;
492 	COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
493 	int result;
494 
495 	pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
496 	pll_patameters.ulClock.ucPostDiv =
497 		COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
498 
499 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
500 		 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
501 		(uint32_t *)&pll_patameters, sizeof(pll_patameters));
502 
503 	if (0 == result) {
504 		dividers->pll_post_divider =
505 			pll_patameters.ulClock.ucPostDiv;
506 		dividers->real_clock =
507 			le32_to_cpu(pll_patameters.ulClock.ulClock);
508 
509 		dividers->ul_fb_div.ul_fb_div_frac =
510 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
511 		dividers->ul_fb_div.ul_fb_div =
512 			le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
513 
514 		dividers->uc_pll_ref_div =
515 			pll_patameters.ucPllRefDiv;
516 		dividers->uc_pll_post_div =
517 			pll_patameters.ucPllPostDiv;
518 		dividers->uc_pll_cntl_flag =
519 			pll_patameters.ucPllCntlFlag;
520 	}
521 
522 	return result;
523 }
524 
525 /*
526  * Get the reference clock in 10KHz
527  */
528 uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
529 {
530 	ATOM_FIRMWARE_INFO *fw_info;
531 	u8 frev, crev;
532 	u16 size;
533 	uint32_t clock;
534 
535 	fw_info = (ATOM_FIRMWARE_INFO *)
536 		smu_atom_get_data_table(hwmgr->adev,
537 			GetIndexIntoMasterTable(DATA, FirmwareInfo),
538 			&size, &frev, &crev);
539 
540 	if (fw_info == NULL)
541 		clock = 2700;
542 	else
543 		clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock));
544 
545 	return clock;
546 }
547 
548 /*
549  * Returns true if the given voltage type is controlled by GPIO pins.
550  * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC,
551  * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
552  * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
553  */
554 bool atomctrl_is_voltage_controlled_by_gpio_v3(
555 		struct pp_hwmgr *hwmgr,
556 		uint8_t voltage_type,
557 		uint8_t voltage_mode)
558 {
559 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
560 		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
561 	bool ret;
562 
563 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
564 			"Could not find Voltage Table in BIOS.", return false;);
565 
566 	ret = (NULL != atomctrl_lookup_voltage_type_v3
567 			(voltage_info, voltage_type, voltage_mode)) ? true : false;
568 
569 	return ret;
570 }
571 
572 int atomctrl_get_voltage_table_v3(
573 		struct pp_hwmgr *hwmgr,
574 		uint8_t voltage_type,
575 		uint8_t voltage_mode,
576 		pp_atomctrl_voltage_table *voltage_table)
577 {
578 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
579 		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
580 	const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
581 	unsigned int i;
582 
583 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
584 			"Could not find Voltage Table in BIOS.", return -1;);
585 
586 	voltage_object = atomctrl_lookup_voltage_type_v3
587 		(voltage_info, voltage_type, voltage_mode);
588 
589 	if (voltage_object == NULL)
590 		return -1;
591 
592 	PP_ASSERT_WITH_CODE(
593 			(voltage_object->asGpioVoltageObj.ucGpioEntryNum <=
594 			PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),
595 			"Too many voltage entries!",
596 			return -1;
597 			);
598 
599 	for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
600 		voltage_table->entries[i].value =
601 			le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
602 		voltage_table->entries[i].smio_low =
603 			le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
604 	}
605 
606 	voltage_table->mask_low    =
607 		le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
608 	voltage_table->count      =
609 		voltage_object->asGpioVoltageObj.ucGpioEntryNum;
610 	voltage_table->phase_delay =
611 		voltage_object->asGpioVoltageObj.ucPhaseDelay;
612 
613 	return 0;
614 }
615 
616 static bool atomctrl_lookup_gpio_pin(
617 		ATOM_GPIO_PIN_LUT * gpio_lookup_table,
618 		const uint32_t pinId,
619 		pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
620 {
621 	unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize);
622 	unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]);
623 	uint8_t *start = (uint8_t *)gpio_lookup_table;
624 
625 	while (offset < size) {
626 		const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment =
627 			(const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset);
628 
629 		if (pinId == pin_assignment->ucGPIO_ID) {
630 			gpio_pin_assignment->uc_gpio_pin_bit_shift =
631 				pin_assignment->ucGpioPinBitShift;
632 			gpio_pin_assignment->us_gpio_pin_aindex =
633 				le16_to_cpu(pin_assignment->usGpioPin_AIndex);
634 			return true;
635 		}
636 
637 		offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
638 	}
639 
640 	return false;
641 }
642 
643 /*
644  * Private Function to get the PowerPlay Table Address.
645  * WARNING: The tabled returned by this function is in
646  * dynamically allocated memory.
647  * The caller has to release if by calling kfree.
648  */
649 static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
650 {
651 	u8 frev, crev;
652 	u16 size;
653 	void *table_address;
654 
655 	table_address = (ATOM_GPIO_PIN_LUT *)
656 		smu_atom_get_data_table(device,
657 				GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
658 				&size, &frev, &crev);
659 
660 	PP_ASSERT_WITH_CODE((NULL != table_address),
661 			"Error retrieving BIOS Table Address!", return NULL;);
662 
663 	return (ATOM_GPIO_PIN_LUT *)table_address;
664 }
665 
666 /*
667  * Returns 1 if the given pin id find in lookup table.
668  */
669 bool atomctrl_get_pp_assign_pin(
670 		struct pp_hwmgr *hwmgr,
671 		const uint32_t pinId,
672 		pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
673 {
674 	bool bRet = false;
675 	ATOM_GPIO_PIN_LUT *gpio_lookup_table =
676 		get_gpio_lookup_table(hwmgr->adev);
677 
678 	PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
679 			"Could not find GPIO lookup Table in BIOS.", return false);
680 
681 	bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
682 		gpio_pin_assignment);
683 
684 	return bRet;
685 }
686 
687 /**
688  * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table.
689  * @hwmgr:              input: pointer to hwManager
690  * @voltage_type:       input: type of EVV voltage VDDC or VDDGFX
691  * @sclk:               input: in 10Khz unit. DPM state SCLK frequency
692  *		         which is define in PPTable SCLK/VDDC dependence
693  *			 table associated with this virtual_voltage_Id
694  * @virtual_voltage_Id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
695  * @voltage: 	        output: real voltage level in unit of mv
696  */
697 int atomctrl_get_voltage_evv_on_sclk(
698 		struct pp_hwmgr *hwmgr,
699 		uint8_t voltage_type,
700 		uint32_t sclk, uint16_t virtual_voltage_Id,
701 		uint16_t *voltage)
702 {
703 	struct amdgpu_device *adev = hwmgr->adev;
704 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
705 	int result;
706 
707 	get_voltage_info_param_space.ucVoltageType   =
708 		voltage_type;
709 	get_voltage_info_param_space.ucVoltageMode   =
710 		ATOM_GET_VOLTAGE_EVV_VOLTAGE;
711 	get_voltage_info_param_space.usVoltageLevel  =
712 		cpu_to_le16(virtual_voltage_Id);
713 	get_voltage_info_param_space.ulSCLKFreq      =
714 		cpu_to_le32(sclk);
715 
716 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
717 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
718 			(uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
719 
720 	*voltage = result ? 0 :
721 			le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
722 				(&get_voltage_info_param_space))->usVoltageLevel);
723 
724 	return result;
725 }
726 
727 /**
728  * atomctrl_get_voltage_evv: gets voltage via call to ATOM COMMAND table.
729  * @hwmgr:              input: pointer to hwManager
730  * @virtual_voltage_id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
731  * @voltage: 	       output: real voltage level in unit of mv
732  */
733 int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
734 			     uint16_t virtual_voltage_id,
735 			     uint16_t *voltage)
736 {
737 	struct amdgpu_device *adev = hwmgr->adev;
738 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
739 	int result;
740 	int entry_id;
741 
742 	/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
743 	for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
744 		if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
745 			/* found */
746 			break;
747 		}
748 	}
749 
750 	if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
751 	        pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
752 	        return -EINVAL;
753 	}
754 
755 	get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
756 	get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
757 	get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
758 	get_voltage_info_param_space.ulSCLKFreq =
759 		cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
760 
761 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
762 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
763 			(uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
764 
765 	if (0 != result)
766 		return result;
767 
768 	*voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
769 				(&get_voltage_info_param_space))->usVoltageLevel);
770 
771 	return result;
772 }
773 
774 /*
775  * Get the mpll reference clock in 10KHz
776  */
777 uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
778 {
779 	ATOM_COMMON_TABLE_HEADER *fw_info;
780 	uint32_t clock;
781 	u8 frev, crev;
782 	u16 size;
783 
784 	fw_info = (ATOM_COMMON_TABLE_HEADER *)
785 		smu_atom_get_data_table(hwmgr->adev,
786 				GetIndexIntoMasterTable(DATA, FirmwareInfo),
787 				&size, &frev, &crev);
788 
789 	if (fw_info == NULL)
790 		clock = 2700;
791 	else {
792 		if ((fw_info->ucTableFormatRevision == 2) &&
793 			(le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) {
794 			ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 =
795 				(ATOM_FIRMWARE_INFO_V2_1 *)fw_info;
796 			clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock));
797 		} else {
798 			ATOM_FIRMWARE_INFO *fwInfo_0_0 =
799 				(ATOM_FIRMWARE_INFO *)fw_info;
800 			clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock));
801 		}
802 	}
803 
804 	return clock;
805 }
806 
807 /*
808  * Get the asic internal spread spectrum table
809  */
810 static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
811 {
812 	ATOM_ASIC_INTERNAL_SS_INFO *table = NULL;
813 	u8 frev, crev;
814 	u16 size;
815 
816 	table = (ATOM_ASIC_INTERNAL_SS_INFO *)
817 		smu_atom_get_data_table(device,
818 			GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
819 			&size, &frev, &crev);
820 
821 	return table;
822 }
823 
824 bool atomctrl_is_asic_internal_ss_supported(struct pp_hwmgr *hwmgr)
825 {
826 	ATOM_ASIC_INTERNAL_SS_INFO *table =
827 		asic_internal_ss_get_ss_table(hwmgr->adev);
828 
829 	if (table)
830 		return true;
831 	else
832 		return false;
833 }
834 
835 /*
836  * Get the asic internal spread spectrum assignment
837  */
838 static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
839 		const uint8_t clockSource,
840 		const uint32_t clockSpeed,
841 		pp_atomctrl_internal_ss_info *ssEntry)
842 {
843 	ATOM_ASIC_INTERNAL_SS_INFO *table;
844 	ATOM_ASIC_SS_ASSIGNMENT *ssInfo;
845 	int entry_found = 0;
846 
847 	memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
848 
849 	table = asic_internal_ss_get_ss_table(hwmgr->adev);
850 
851 	if (NULL == table)
852 		return -1;
853 
854 	ssInfo = &table->asSpreadSpectrum[0];
855 
856 	while (((uint8_t *)ssInfo - (uint8_t *)table) <
857 		le16_to_cpu(table->sHeader.usStructureSize)) {
858 		if ((clockSource == ssInfo->ucClockIndication) &&
859 			((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange))) {
860 			entry_found = 1;
861 			break;
862 		}
863 
864 		ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo +
865 				sizeof(ATOM_ASIC_SS_ASSIGNMENT));
866 	}
867 
868 	if (entry_found) {
869 		ssEntry->speed_spectrum_percentage =
870 			le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
871 		ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
872 
873 		if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
874 			(GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
875 			(GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) {
876 			ssEntry->speed_spectrum_rate /= 100;
877 		}
878 
879 		switch (ssInfo->ucSpreadSpectrumMode) {
880 		case 0:
881 			ssEntry->speed_spectrum_mode =
882 				pp_atomctrl_spread_spectrum_mode_down;
883 			break;
884 		case 1:
885 			ssEntry->speed_spectrum_mode =
886 				pp_atomctrl_spread_spectrum_mode_center;
887 			break;
888 		default:
889 			ssEntry->speed_spectrum_mode =
890 				pp_atomctrl_spread_spectrum_mode_down;
891 			break;
892 		}
893 	}
894 
895 	return entry_found ? 0 : 1;
896 }
897 
898 /*
899  * Get the memory clock spread spectrum info
900  */
901 int atomctrl_get_memory_clock_spread_spectrum(
902 		struct pp_hwmgr *hwmgr,
903 		const uint32_t memory_clock,
904 		pp_atomctrl_internal_ss_info *ssInfo)
905 {
906 	return asic_internal_ss_get_ss_asignment(hwmgr,
907 			ASIC_INTERNAL_MEMORY_SS, memory_clock, ssInfo);
908 }
909 
910 /*
911  * Get the engine clock spread spectrum info
912  */
913 int atomctrl_get_engine_clock_spread_spectrum(
914 		struct pp_hwmgr *hwmgr,
915 		const uint32_t engine_clock,
916 		pp_atomctrl_internal_ss_info *ssInfo)
917 {
918 	return asic_internal_ss_get_ss_asignment(hwmgr,
919 			ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
920 }
921 
922 int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
923 		uint16_t end_index, uint32_t *efuse)
924 {
925 	struct amdgpu_device *adev = hwmgr->adev;
926 	uint32_t mask;
927 	int result;
928 	READ_EFUSE_VALUE_PARAMETER efuse_param;
929 
930 	if ((end_index - start_index)  == 31)
931 		mask = 0xFFFFFFFF;
932 	else
933 		mask = (1 << ((end_index - start_index) + 1)) - 1;
934 
935 	efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
936 	efuse_param.sEfuse.ucBitShift = (uint8_t)
937 			(start_index - ((start_index / 32) * 32));
938 	efuse_param.sEfuse.ucBitLength  = (uint8_t)
939 			((end_index - start_index) + 1);
940 
941 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
942 			GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
943 			(uint32_t *)&efuse_param, sizeof(efuse_param));
944 	*efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
945 
946 	return result;
947 }
948 
949 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
950 			      uint8_t level)
951 {
952 	struct amdgpu_device *adev = hwmgr->adev;
953 	DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
954 	int result;
955 
956 	memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
957 		memory_clock & SET_CLOCK_FREQ_MASK;
958 	memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
959 		ADJUST_MC_SETTING_PARAM;
960 	memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
961 
962 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
963 		 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
964 		(uint32_t *)&memory_clock_parameters, sizeof(memory_clock_parameters));
965 
966 	return result;
967 }
968 
969 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
970 				uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
971 {
972 	struct amdgpu_device *adev = hwmgr->adev;
973 	int result;
974 	GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
975 
976 	get_voltage_info_param_space.ucVoltageType = voltage_type;
977 	get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
978 	get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
979 	get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
980 
981 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
982 			GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
983 			(uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
984 
985 	*voltage = result ? 0 :
986 		le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
987 
988 	return result;
989 }
990 
991 int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
992 {
993 
994 	int i;
995 	u8 frev, crev;
996 	u16 size;
997 
998 	ATOM_SMU_INFO_V2_1 *psmu_info =
999 		(ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1000 			GetIndexIntoMasterTable(DATA, SMU_Info),
1001 			&size, &frev, &crev);
1002 
1003 	if (!psmu_info)
1004 		return -EINVAL;
1005 
1006 	for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
1007 		table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
1008 		table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
1009 		table->entry[i].usFcw_pcc =
1010 			le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
1011 		table->entry[i].usFcw_trans_upper =
1012 			le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
1013 		table->entry[i].usRcw_trans_lower =
1014 			le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 int atomctrl_get_vddc_shared_railinfo(struct pp_hwmgr *hwmgr, uint8_t *shared_rail)
1021 {
1022 	ATOM_SMU_INFO_V2_1 *psmu_info =
1023 		(ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1024 			GetIndexIntoMasterTable(DATA, SMU_Info),
1025 			NULL, NULL, NULL);
1026 	if (!psmu_info)
1027 		return -1;
1028 
1029 	*shared_rail = psmu_info->ucSharePowerSource;
1030 
1031 	return 0;
1032 }
1033 
1034 int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
1035 				  struct pp_atom_ctrl__avfs_parameters *param)
1036 {
1037 	ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1038 
1039 	if (param == NULL)
1040 		return -EINVAL;
1041 
1042 	profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1043 			smu_atom_get_data_table(hwmgr->adev,
1044 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1045 					NULL, NULL, NULL);
1046 	if (!profile)
1047 		return -1;
1048 
1049 	param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
1050 	param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
1051 	param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
1052 	param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
1053 	param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
1054 	param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
1055 	param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
1056 	param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
1057 	param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
1058 	param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
1059 	param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
1060 	param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
1061 	param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1062 	param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1063 	param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1064 	param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
1065 	param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
1066 	param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
1067 	param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
1068 	param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1069 	param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1070 	param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1071 	param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1072 	param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
1073 	param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1074 
1075 	return 0;
1076 }
1077 
1078 int  atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1079 				uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
1080 				uint16_t *load_line)
1081 {
1082 	ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
1083 		(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
1084 
1085 	const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
1086 
1087 	PP_ASSERT_WITH_CODE((NULL != voltage_info),
1088 			"Could not find Voltage Table in BIOS.", return -EINVAL);
1089 
1090 	voltage_object = atomctrl_lookup_voltage_type_v3
1091 		(voltage_info, voltage_type,  VOLTAGE_OBJ_SVID2);
1092 
1093 	*svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId;
1094 	*svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId;
1095 	*load_line = voltage_object->asSVID2Obj.usLoadLine_PSI;
1096 
1097 	return 0;
1098 }
1099 
1100 int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
1101 {
1102 	struct amdgpu_device *adev = hwmgr->adev;
1103 	SET_VOLTAGE_PS_ALLOCATION allocation;
1104 	SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
1105 			(SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
1106 	int result;
1107 
1108 	voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
1109 
1110 	result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1111 			GetIndexIntoMasterTable(COMMAND, SetVoltage),
1112 			(uint32_t *)voltage_parameters, sizeof(*voltage_parameters));
1113 
1114 	*virtual_voltage_id = voltage_parameters->usVoltageLevel;
1115 
1116 	return result;
1117 }
1118 
1119 int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
1120 					uint16_t *vddc, uint16_t *vddci,
1121 					uint16_t virtual_voltage_id,
1122 					uint16_t efuse_voltage_id)
1123 {
1124 	int i, j;
1125 	int ix;
1126 	u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
1127 	ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
1128 
1129 	*vddc = 0;
1130 	*vddci = 0;
1131 
1132 	ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
1133 
1134 	profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
1135 			smu_atom_get_data_table(hwmgr->adev,
1136 					ix,
1137 					NULL, NULL, NULL);
1138 	if (!profile)
1139 		return -EINVAL;
1140 
1141 	if ((profile->asHeader.ucTableFormatRevision >= 2) &&
1142 		(profile->asHeader.ucTableContentRevision >= 1) &&
1143 		(profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
1144 		leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
1145 		vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
1146 		vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
1147 		if (profile->ucElbVDDC_Num > 0) {
1148 			for (i = 0; i < profile->ucElbVDDC_Num; i++) {
1149 				if (vddc_id_buf[i] == virtual_voltage_id) {
1150 					for (j = 0; j < profile->ucLeakageBinNum; j++) {
1151 						if (efuse_voltage_id <= leakage_bin[j]) {
1152 							*vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
1153 							break;
1154 						}
1155 					}
1156 					break;
1157 				}
1158 			}
1159 		}
1160 
1161 		vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
1162 		vddci_buf   = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
1163 		if (profile->ucElbVDDCI_Num > 0) {
1164 			for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
1165 				if (vddci_id_buf[i] == virtual_voltage_id) {
1166 					for (j = 0; j < profile->ucLeakageBinNum; j++) {
1167 						if (efuse_voltage_id <= leakage_bin[j]) {
1168 							*vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
1169 							break;
1170 						}
1171 					}
1172 					break;
1173 				}
1174 			}
1175 		}
1176 	}
1177 
1178 	return 0;
1179 }
1180 
1181 void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
1182 							uint32_t *min_vddc)
1183 {
1184 	void *profile;
1185 
1186 	profile = smu_atom_get_data_table(hwmgr->adev,
1187 					GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1188 					NULL, NULL, NULL);
1189 
1190 	if (profile) {
1191 		switch (hwmgr->chip_id) {
1192 		case CHIP_TONGA:
1193 		case CHIP_FIJI:
1194 			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
1195 			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
1196 			return;
1197 		case CHIP_POLARIS11:
1198 		case CHIP_POLARIS10:
1199 		case CHIP_POLARIS12:
1200 			*max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
1201 			*min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
1202 			return;
1203 		default:
1204 			break;
1205 		}
1206 	}
1207 	*max_vddc = 0;
1208 	*min_vddc = 0;
1209 }
1210 
1211 int atomctrl_get_edc_hilo_leakage_offset_table(struct pp_hwmgr *hwmgr,
1212 					       AtomCtrl_HiLoLeakageOffsetTable *table)
1213 {
1214 	ATOM_GFX_INFO_V2_3 *gfxinfo = smu_atom_get_data_table(hwmgr->adev,
1215 					GetIndexIntoMasterTable(DATA, GFX_Info),
1216 					NULL, NULL, NULL);
1217 	if (!gfxinfo)
1218 		return -ENOENT;
1219 
1220 	table->usHiLoLeakageThreshold = gfxinfo->usHiLoLeakageThreshold;
1221 	table->usEdcDidtLoDpm7TableOffset = gfxinfo->usEdcDidtLoDpm7TableOffset;
1222 	table->usEdcDidtHiDpm7TableOffset = gfxinfo->usEdcDidtHiDpm7TableOffset;
1223 
1224 	return 0;
1225 }
1226 
1227 static AtomCtrl_EDCLeakgeTable *get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1228 						      uint16_t offset)
1229 {
1230 	void *table_address;
1231 	char *temp;
1232 
1233 	table_address = smu_atom_get_data_table(hwmgr->adev,
1234 			GetIndexIntoMasterTable(DATA, GFX_Info),
1235 			NULL, NULL, NULL);
1236 	if (!table_address)
1237 		return NULL;
1238 
1239 	temp = (char *)table_address;
1240 	table_address += offset;
1241 
1242 	return (AtomCtrl_EDCLeakgeTable *)temp;
1243 }
1244 
1245 int atomctrl_get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1246 				   AtomCtrl_EDCLeakgeTable *table,
1247 				   uint16_t offset)
1248 {
1249 	uint32_t length, i;
1250 	AtomCtrl_EDCLeakgeTable *leakage_table =
1251 		get_edc_leakage_table(hwmgr, offset);
1252 
1253 	if (!leakage_table)
1254 		return -ENOENT;
1255 
1256 	length = sizeof(leakage_table->DIDT_REG) /
1257 		 sizeof(leakage_table->DIDT_REG[0]);
1258 	for (i = 0; i < length; i++)
1259 		table->DIDT_REG[i] = leakage_table->DIDT_REG[i];
1260 
1261 	return 0;
1262 }
1263