1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "pp_debug.h"
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include "atom.h"
28 #include "ppatomctrl.h"
29 #include "atombios.h"
30 #include "cgs_common.h"
31
32 #define MEM_ID_MASK 0xff000000
33 #define MEM_ID_SHIFT 24
34 #define CLOCK_RANGE_MASK 0x00ffffff
35 #define CLOCK_RANGE_SHIFT 0
36 #define LOW_NIBBLE_MASK 0xf
37 #define DATA_EQU_PREV 0
38 #define DATA_FROM_TABLE 4
39
40 union voltage_object_info {
41 struct _ATOM_VOLTAGE_OBJECT_INFO v1;
42 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
43 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
44 };
45
atomctrl_retrieve_ac_timing(uint8_t index,ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)46 static int atomctrl_retrieve_ac_timing(
47 uint8_t index,
48 ATOM_INIT_REG_BLOCK *reg_block,
49 pp_atomctrl_mc_reg_table *table)
50 {
51 uint32_t i, j;
52 uint8_t tmem_id;
53 ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
54 ((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize));
55
56 uint8_t num_ranges = 0;
57
58 while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK &&
59 num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES) {
60 tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
61
62 if (index == tmem_id) {
63 table->mc_reg_table_entry[num_ranges].mclk_max =
64 (uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK) >>
65 CLOCK_RANGE_SHIFT);
66
67 for (i = 0, j = 1; i < table->last; i++) {
68 if ((table->mc_reg_address[i].uc_pre_reg_data &
69 LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
70 table->mc_reg_table_entry[num_ranges].mc_data[i] =
71 (uint32_t)*((uint32_t *)reg_data + j);
72 j++;
73 } else if ((table->mc_reg_address[i].uc_pre_reg_data &
74 LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
75 if (i)
76 table->mc_reg_table_entry[num_ranges].mc_data[i] =
77 table->mc_reg_table_entry[num_ranges].mc_data[i-1];
78 }
79 }
80 num_ranges++;
81 }
82
83 reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
84 ((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)) ;
85 }
86
87 PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),
88 "Invalid VramInfo table.", return -1);
89 table->num_entries = num_ranges;
90
91 return 0;
92 }
93
94 /**
95 * atomctrl_set_mc_reg_address_table - Get memory clock AC timing registers index from VBIOS table
96 * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1
97 * @reg_block: the address ATOM_INIT_REG_BLOCK
98 * @table: the address of MCRegTable
99 * Return: 0
100 */
atomctrl_set_mc_reg_address_table(ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)101 static int atomctrl_set_mc_reg_address_table(
102 ATOM_INIT_REG_BLOCK *reg_block,
103 pp_atomctrl_mc_reg_table *table)
104 {
105 uint8_t i = 0;
106 uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize))
107 / sizeof(ATOM_INIT_REG_INDEX_FORMAT));
108 ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0];
109
110 num_entries--; /* subtract 1 data end mark entry */
111
112 PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),
113 "Invalid VramInfo table.", return -1);
114
115 /* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */
116 while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER)) &&
117 (i < num_entries)) {
118 table->mc_reg_address[i].s1 =
119 (uint16_t)(le16_to_cpu(format->usRegIndex));
120 table->mc_reg_address[i].uc_pre_reg_data =
121 format->ucPreRegDataLength;
122
123 i++;
124 format = (ATOM_INIT_REG_INDEX_FORMAT *)
125 ((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
126 }
127
128 table->last = i;
129 return 0;
130 }
131
atomctrl_initialize_mc_reg_table(struct pp_hwmgr * hwmgr,uint8_t module_index,pp_atomctrl_mc_reg_table * table)132 int atomctrl_initialize_mc_reg_table(
133 struct pp_hwmgr *hwmgr,
134 uint8_t module_index,
135 pp_atomctrl_mc_reg_table *table)
136 {
137 ATOM_VRAM_INFO_HEADER_V2_1 *vram_info;
138 ATOM_INIT_REG_BLOCK *reg_block;
139 int result = 0;
140 u8 frev, crev;
141 u16 size;
142
143 vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
144 smu_atom_get_data_table(hwmgr->adev,
145 GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
146
147 if (module_index >= vram_info->ucNumOfVRAMModule) {
148 pr_err("Invalid VramInfo table.");
149 result = -1;
150 } else if (vram_info->sHeader.ucTableFormatRevision < 2) {
151 pr_err("Invalid VramInfo table.");
152 result = -1;
153 }
154
155 if (0 == result) {
156 reg_block = (ATOM_INIT_REG_BLOCK *)
157 ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
158 result = atomctrl_set_mc_reg_address_table(reg_block, table);
159 }
160
161 if (0 == result) {
162 result = atomctrl_retrieve_ac_timing(module_index,
163 reg_block, table);
164 }
165
166 return result;
167 }
168
atomctrl_initialize_mc_reg_table_v2_2(struct pp_hwmgr * hwmgr,uint8_t module_index,pp_atomctrl_mc_reg_table * table)169 int atomctrl_initialize_mc_reg_table_v2_2(
170 struct pp_hwmgr *hwmgr,
171 uint8_t module_index,
172 pp_atomctrl_mc_reg_table *table)
173 {
174 ATOM_VRAM_INFO_HEADER_V2_2 *vram_info;
175 ATOM_INIT_REG_BLOCK *reg_block;
176 int result = 0;
177 u8 frev, crev;
178 u16 size;
179
180 vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
181 smu_atom_get_data_table(hwmgr->adev,
182 GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
183
184 if (module_index >= vram_info->ucNumOfVRAMModule) {
185 pr_err("Invalid VramInfo table.");
186 result = -1;
187 } else if (vram_info->sHeader.ucTableFormatRevision < 2) {
188 pr_err("Invalid VramInfo table.");
189 result = -1;
190 }
191
192 if (0 == result) {
193 reg_block = (ATOM_INIT_REG_BLOCK *)
194 ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
195 result = atomctrl_set_mc_reg_address_table(reg_block, table);
196 }
197
198 if (0 == result) {
199 result = atomctrl_retrieve_ac_timing(module_index,
200 reg_block, table);
201 }
202
203 return result;
204 }
205
206 /*
207 * Set DRAM timings based on engine clock and memory clock.
208 */
atomctrl_set_engine_dram_timings_rv770(struct pp_hwmgr * hwmgr,uint32_t engine_clock,uint32_t memory_clock)209 int atomctrl_set_engine_dram_timings_rv770(
210 struct pp_hwmgr *hwmgr,
211 uint32_t engine_clock,
212 uint32_t memory_clock)
213 {
214 struct amdgpu_device *adev = hwmgr->adev;
215
216 SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
217
218 /* They are both in 10KHz Units. */
219 engine_clock_parameters.ulTargetEngineClock =
220 cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
221 ((COMPUTE_ENGINE_PLL_PARAM << 24)));
222
223 /* in 10 khz units.*/
224 engine_clock_parameters.sReserved.ulClock =
225 cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
226
227 return amdgpu_atom_execute_table(adev->mode_info.atom_context,
228 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
229 (uint32_t *)&engine_clock_parameters, sizeof(engine_clock_parameters));
230 }
231
232 /*
233 * Private Function to get the PowerPlay Table Address.
234 * WARNING: The tabled returned by this function is in
235 * dynamically allocated memory.
236 * The caller has to release if by calling kfree.
237 */
get_voltage_info_table(void * device)238 static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
239 {
240 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
241 u8 frev, crev;
242 u16 size;
243 union voltage_object_info *voltage_info;
244
245 voltage_info = (union voltage_object_info *)
246 smu_atom_get_data_table(device, index,
247 &size, &frev, &crev);
248
249 if (voltage_info != NULL)
250 return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3);
251 else
252 return NULL;
253 }
254
atomctrl_lookup_voltage_type_v3(const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,uint8_t voltage_type,uint8_t voltage_mode)255 static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
256 const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,
257 uint8_t voltage_type, uint8_t voltage_mode)
258 {
259 unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize);
260 unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
261 uint8_t *start = (uint8_t *)voltage_object_info_table;
262
263 while (offset < size) {
264 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object =
265 (const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
266
267 if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType &&
268 voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode)
269 return voltage_object;
270
271 offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize);
272 }
273
274 return NULL;
275 }
276
277 /**
278 * atomctrl_get_memory_pll_dividers_si
279 *
280 * @hwmgr: input parameter: pointer to HwMgr
281 * @clock_value: input parameter: memory clock
282 * @mpll_param: output parameter: memory clock parameters
283 * @strobe_mode: input parameter: 1 for strobe mode, 0 for performance mode
284 */
atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param,bool strobe_mode)285 int atomctrl_get_memory_pll_dividers_si(
286 struct pp_hwmgr *hwmgr,
287 uint32_t clock_value,
288 pp_atomctrl_memory_clock_param *mpll_param,
289 bool strobe_mode)
290 {
291 struct amdgpu_device *adev = hwmgr->adev;
292 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
293 int result;
294
295 mpll_parameters.ulClock = cpu_to_le32(clock_value);
296 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
297
298 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
299 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
300 (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
301
302 if (0 == result) {
303 mpll_param->mpll_fb_divider.clk_frac =
304 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
305 mpll_param->mpll_fb_divider.cl_kf =
306 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
307 mpll_param->mpll_post_divider =
308 (uint32_t)mpll_parameters.ucPostDiv;
309 mpll_param->vco_mode =
310 (uint32_t)(mpll_parameters.ucPllCntlFlag &
311 MPLL_CNTL_FLAG_VCO_MODE_MASK);
312 mpll_param->yclk_sel =
313 (uint32_t)((mpll_parameters.ucPllCntlFlag &
314 MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0);
315 mpll_param->qdr =
316 (uint32_t)((mpll_parameters.ucPllCntlFlag &
317 MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0);
318 mpll_param->half_rate =
319 (uint32_t)((mpll_parameters.ucPllCntlFlag &
320 MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0);
321 mpll_param->dll_speed =
322 (uint32_t)(mpll_parameters.ucDllSpeed);
323 mpll_param->bw_ctrl =
324 (uint32_t)(mpll_parameters.ucBWCntl);
325 }
326
327 return result;
328 }
329
330 /**
331 * atomctrl_get_memory_pll_dividers_vi
332 *
333 * @hwmgr: input parameter: pointer to HwMgr
334 * @clock_value: input parameter: memory clock
335 * @mpll_param: output parameter: memory clock parameters
336 */
atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param)337 int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
338 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
339 {
340 struct amdgpu_device *adev = hwmgr->adev;
341 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
342 int result;
343
344 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
345
346 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
347 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
348 (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
349
350 if (!result)
351 mpll_param->mpll_post_divider =
352 (uint32_t)mpll_parameters.ulClock.ucPostDiv;
353
354 return result;
355 }
356
atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param_ai * mpll_param)357 int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
358 uint32_t clock_value,
359 pp_atomctrl_memory_clock_param_ai *mpll_param)
360 {
361 struct amdgpu_device *adev = hwmgr->adev;
362 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0};
363 int result;
364
365 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
366
367 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
368 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
369 (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
370
371 /* VEGAM's mpll takes sometime to finish computing */
372 udelay(10);
373
374 if (!result) {
375 mpll_param->ulMclk_fcw_int =
376 le16_to_cpu(mpll_parameters.usMclk_fcw_int);
377 mpll_param->ulMclk_fcw_frac =
378 le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
379 mpll_param->ulClock =
380 le32_to_cpu(mpll_parameters.ulClock.ulClock);
381 mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
382 }
383
384 return result;
385 }
386
atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_kong * dividers)387 int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
388 uint32_t clock_value,
389 pp_atomctrl_clock_dividers_kong *dividers)
390 {
391 struct amdgpu_device *adev = hwmgr->adev;
392 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
393 int result;
394
395 pll_parameters.ulClock = cpu_to_le32(clock_value);
396
397 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
398 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
399 (uint32_t *)&pll_parameters, sizeof(pll_parameters));
400
401 if (0 == result) {
402 dividers->pll_post_divider = pll_parameters.ucPostDiv;
403 dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
404 }
405
406 return result;
407 }
408
atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)409 int atomctrl_get_engine_pll_dividers_vi(
410 struct pp_hwmgr *hwmgr,
411 uint32_t clock_value,
412 pp_atomctrl_clock_dividers_vi *dividers)
413 {
414 struct amdgpu_device *adev = hwmgr->adev;
415 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
416 int result;
417
418 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
419 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
420
421 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
422 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
423 (uint32_t *)&pll_patameters, sizeof(pll_patameters));
424
425 if (0 == result) {
426 dividers->pll_post_divider =
427 pll_patameters.ulClock.ucPostDiv;
428 dividers->real_clock =
429 le32_to_cpu(pll_patameters.ulClock.ulClock);
430
431 dividers->ul_fb_div.ul_fb_div_frac =
432 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
433 dividers->ul_fb_div.ul_fb_div =
434 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
435
436 dividers->uc_pll_ref_div =
437 pll_patameters.ucPllRefDiv;
438 dividers->uc_pll_post_div =
439 pll_patameters.ucPllPostDiv;
440 dividers->uc_pll_cntl_flag =
441 pll_patameters.ucPllCntlFlag;
442 }
443
444 return result;
445 }
446
atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_ai * dividers)447 int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
448 uint32_t clock_value,
449 pp_atomctrl_clock_dividers_ai *dividers)
450 {
451 struct amdgpu_device *adev = hwmgr->adev;
452 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
453 int result;
454
455 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
456 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
457
458 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
459 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
460 (uint32_t *)&pll_patameters, sizeof(pll_patameters));
461
462 if (0 == result) {
463 dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
464 dividers->usSclk_fcw_int = le16_to_cpu(pll_patameters.usSclk_fcw_int);
465 dividers->ucSclkPostDiv = pll_patameters.ucSclkPostDiv;
466 dividers->ucSclkVcoMode = pll_patameters.ucSclkVcoMode;
467 dividers->ucSclkPllRange = pll_patameters.ucSclkPllRange;
468 dividers->ucSscEnable = pll_patameters.ucSscEnable;
469 dividers->usSsc_fcw1_frac = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
470 dividers->usSsc_fcw1_int = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
471 dividers->usPcc_fcw_int = le16_to_cpu(pll_patameters.usPcc_fcw_int);
472 dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
473 dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
474 }
475 return result;
476 }
477
atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)478 int atomctrl_get_dfs_pll_dividers_vi(
479 struct pp_hwmgr *hwmgr,
480 uint32_t clock_value,
481 pp_atomctrl_clock_dividers_vi *dividers)
482 {
483 struct amdgpu_device *adev = hwmgr->adev;
484 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
485 int result;
486
487 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
488 pll_patameters.ulClock.ucPostDiv =
489 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
490
491 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
492 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
493 (uint32_t *)&pll_patameters, sizeof(pll_patameters));
494
495 if (0 == result) {
496 dividers->pll_post_divider =
497 pll_patameters.ulClock.ucPostDiv;
498 dividers->real_clock =
499 le32_to_cpu(pll_patameters.ulClock.ulClock);
500
501 dividers->ul_fb_div.ul_fb_div_frac =
502 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
503 dividers->ul_fb_div.ul_fb_div =
504 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
505
506 dividers->uc_pll_ref_div =
507 pll_patameters.ucPllRefDiv;
508 dividers->uc_pll_post_div =
509 pll_patameters.ucPllPostDiv;
510 dividers->uc_pll_cntl_flag =
511 pll_patameters.ucPllCntlFlag;
512 }
513
514 return result;
515 }
516
517 /*
518 * Get the reference clock in 10KHz
519 */
atomctrl_get_reference_clock(struct pp_hwmgr * hwmgr)520 uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
521 {
522 ATOM_FIRMWARE_INFO *fw_info;
523 u8 frev, crev;
524 u16 size;
525 uint32_t clock;
526
527 fw_info = (ATOM_FIRMWARE_INFO *)
528 smu_atom_get_data_table(hwmgr->adev,
529 GetIndexIntoMasterTable(DATA, FirmwareInfo),
530 &size, &frev, &crev);
531
532 if (fw_info == NULL)
533 clock = 2700;
534 else
535 clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock));
536
537 return clock;
538 }
539
540 /*
541 * Returns true if the given voltage type is controlled by GPIO pins.
542 * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC,
543 * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
544 * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
545 */
atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode)546 bool atomctrl_is_voltage_controlled_by_gpio_v3(
547 struct pp_hwmgr *hwmgr,
548 uint8_t voltage_type,
549 uint8_t voltage_mode)
550 {
551 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
552 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
553 bool ret;
554
555 PP_ASSERT_WITH_CODE((NULL != voltage_info),
556 "Could not find Voltage Table in BIOS.", return false;);
557
558 ret = (NULL != atomctrl_lookup_voltage_type_v3
559 (voltage_info, voltage_type, voltage_mode)) ? true : false;
560
561 return ret;
562 }
563
atomctrl_get_voltage_table_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode,pp_atomctrl_voltage_table * voltage_table)564 int atomctrl_get_voltage_table_v3(
565 struct pp_hwmgr *hwmgr,
566 uint8_t voltage_type,
567 uint8_t voltage_mode,
568 pp_atomctrl_voltage_table *voltage_table)
569 {
570 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
571 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
572 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
573 unsigned int i;
574
575 PP_ASSERT_WITH_CODE((NULL != voltage_info),
576 "Could not find Voltage Table in BIOS.", return -1;);
577
578 voltage_object = atomctrl_lookup_voltage_type_v3
579 (voltage_info, voltage_type, voltage_mode);
580
581 if (voltage_object == NULL)
582 return -1;
583
584 PP_ASSERT_WITH_CODE(
585 (voltage_object->asGpioVoltageObj.ucGpioEntryNum <=
586 PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),
587 "Too many voltage entries!",
588 return -1;
589 );
590
591 for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
592 voltage_table->entries[i].value =
593 le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
594 voltage_table->entries[i].smio_low =
595 le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
596 }
597
598 voltage_table->mask_low =
599 le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
600 voltage_table->count =
601 voltage_object->asGpioVoltageObj.ucGpioEntryNum;
602 voltage_table->phase_delay =
603 voltage_object->asGpioVoltageObj.ucPhaseDelay;
604
605 return 0;
606 }
607
atomctrl_lookup_gpio_pin(ATOM_GPIO_PIN_LUT * gpio_lookup_table,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)608 static bool atomctrl_lookup_gpio_pin(
609 ATOM_GPIO_PIN_LUT * gpio_lookup_table,
610 const uint32_t pinId,
611 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
612 {
613 unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize);
614 unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]);
615 uint8_t *start = (uint8_t *)gpio_lookup_table;
616
617 while (offset < size) {
618 const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment =
619 (const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset);
620
621 if (pinId == pin_assignment->ucGPIO_ID) {
622 gpio_pin_assignment->uc_gpio_pin_bit_shift =
623 pin_assignment->ucGpioPinBitShift;
624 gpio_pin_assignment->us_gpio_pin_aindex =
625 le16_to_cpu(pin_assignment->usGpioPin_AIndex);
626 return true;
627 }
628
629 offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
630 }
631
632 return false;
633 }
634
635 /*
636 * Private Function to get the PowerPlay Table Address.
637 * WARNING: The tabled returned by this function is in
638 * dynamically allocated memory.
639 * The caller has to release if by calling kfree.
640 */
get_gpio_lookup_table(void * device)641 static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
642 {
643 u8 frev, crev;
644 u16 size;
645 void *table_address;
646
647 table_address = (ATOM_GPIO_PIN_LUT *)
648 smu_atom_get_data_table(device,
649 GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
650 &size, &frev, &crev);
651
652 PP_ASSERT_WITH_CODE((NULL != table_address),
653 "Error retrieving BIOS Table Address!", return NULL;);
654
655 return (ATOM_GPIO_PIN_LUT *)table_address;
656 }
657
658 /*
659 * Returns 1 if the given pin id find in lookup table.
660 */
atomctrl_get_pp_assign_pin(struct pp_hwmgr * hwmgr,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)661 bool atomctrl_get_pp_assign_pin(
662 struct pp_hwmgr *hwmgr,
663 const uint32_t pinId,
664 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
665 {
666 bool bRet = false;
667 ATOM_GPIO_PIN_LUT *gpio_lookup_table =
668 get_gpio_lookup_table(hwmgr->adev);
669
670 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
671 "Could not find GPIO lookup Table in BIOS.", return false);
672
673 bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
674 gpio_pin_assignment);
675
676 return bRet;
677 }
678
679 /**
680 * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table.
681 * @hwmgr: input: pointer to hwManager
682 * @voltage_type: input: type of EVV voltage VDDC or VDDGFX
683 * @sclk: input: in 10Khz unit. DPM state SCLK frequency
684 * which is define in PPTable SCLK/VDDC dependence
685 * table associated with this virtual_voltage_Id
686 * @virtual_voltage_Id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
687 * @voltage: output: real voltage level in unit of mv
688 */
atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint16_t * voltage)689 int atomctrl_get_voltage_evv_on_sclk(
690 struct pp_hwmgr *hwmgr,
691 uint8_t voltage_type,
692 uint32_t sclk, uint16_t virtual_voltage_Id,
693 uint16_t *voltage)
694 {
695 struct amdgpu_device *adev = hwmgr->adev;
696 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
697 int result;
698
699 get_voltage_info_param_space.ucVoltageType =
700 voltage_type;
701 get_voltage_info_param_space.ucVoltageMode =
702 ATOM_GET_VOLTAGE_EVV_VOLTAGE;
703 get_voltage_info_param_space.usVoltageLevel =
704 cpu_to_le16(virtual_voltage_Id);
705 get_voltage_info_param_space.ulSCLKFreq =
706 cpu_to_le32(sclk);
707
708 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
709 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
710 (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
711
712 *voltage = result ? 0 :
713 le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
714 (&get_voltage_info_param_space))->usVoltageLevel);
715
716 return result;
717 }
718
719 /**
720 * atomctrl_get_voltage_evv: gets voltage via call to ATOM COMMAND table.
721 * @hwmgr: input: pointer to hwManager
722 * @virtual_voltage_id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
723 * @voltage: output: real voltage level in unit of mv
724 */
atomctrl_get_voltage_evv(struct pp_hwmgr * hwmgr,uint16_t virtual_voltage_id,uint16_t * voltage)725 int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
726 uint16_t virtual_voltage_id,
727 uint16_t *voltage)
728 {
729 struct amdgpu_device *adev = hwmgr->adev;
730 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
731 int result;
732 int entry_id;
733
734 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
735 for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
736 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
737 /* found */
738 break;
739 }
740 }
741
742 if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
743 pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
744 return -EINVAL;
745 }
746
747 get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
748 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
749 get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
750 get_voltage_info_param_space.ulSCLKFreq =
751 cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
752
753 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
754 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
755 (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
756
757 if (0 != result)
758 return result;
759
760 *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
761 (&get_voltage_info_param_space))->usVoltageLevel);
762
763 return result;
764 }
765
766 /*
767 * Get the mpll reference clock in 10KHz
768 */
atomctrl_get_mpll_reference_clock(struct pp_hwmgr * hwmgr)769 uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
770 {
771 ATOM_COMMON_TABLE_HEADER *fw_info;
772 uint32_t clock;
773 u8 frev, crev;
774 u16 size;
775
776 fw_info = (ATOM_COMMON_TABLE_HEADER *)
777 smu_atom_get_data_table(hwmgr->adev,
778 GetIndexIntoMasterTable(DATA, FirmwareInfo),
779 &size, &frev, &crev);
780
781 if (fw_info == NULL)
782 clock = 2700;
783 else {
784 if ((fw_info->ucTableFormatRevision == 2) &&
785 (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) {
786 ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 =
787 (ATOM_FIRMWARE_INFO_V2_1 *)fw_info;
788 clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock));
789 } else {
790 ATOM_FIRMWARE_INFO *fwInfo_0_0 =
791 (ATOM_FIRMWARE_INFO *)fw_info;
792 clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock));
793 }
794 }
795
796 return clock;
797 }
798
799 /*
800 * Get the asic internal spread spectrum table
801 */
asic_internal_ss_get_ss_table(void * device)802 static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
803 {
804 ATOM_ASIC_INTERNAL_SS_INFO *table = NULL;
805 u8 frev, crev;
806 u16 size;
807
808 table = (ATOM_ASIC_INTERNAL_SS_INFO *)
809 smu_atom_get_data_table(device,
810 GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
811 &size, &frev, &crev);
812
813 return table;
814 }
815
atomctrl_is_asic_internal_ss_supported(struct pp_hwmgr * hwmgr)816 bool atomctrl_is_asic_internal_ss_supported(struct pp_hwmgr *hwmgr)
817 {
818 ATOM_ASIC_INTERNAL_SS_INFO *table =
819 asic_internal_ss_get_ss_table(hwmgr->adev);
820
821 if (table)
822 return true;
823 else
824 return false;
825 }
826
827 /*
828 * Get the asic internal spread spectrum assignment
829 */
asic_internal_ss_get_ss_asignment(struct pp_hwmgr * hwmgr,const uint8_t clockSource,const uint32_t clockSpeed,pp_atomctrl_internal_ss_info * ssEntry)830 static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
831 const uint8_t clockSource,
832 const uint32_t clockSpeed,
833 pp_atomctrl_internal_ss_info *ssEntry)
834 {
835 ATOM_ASIC_INTERNAL_SS_INFO *table;
836 ATOM_ASIC_SS_ASSIGNMENT *ssInfo;
837 int entry_found = 0;
838
839 memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
840
841 table = asic_internal_ss_get_ss_table(hwmgr->adev);
842
843 if (NULL == table)
844 return -1;
845
846 ssInfo = &table->asSpreadSpectrum[0];
847
848 while (((uint8_t *)ssInfo - (uint8_t *)table) <
849 le16_to_cpu(table->sHeader.usStructureSize)) {
850 if ((clockSource == ssInfo->ucClockIndication) &&
851 ((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange))) {
852 entry_found = 1;
853 break;
854 }
855
856 ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo +
857 sizeof(ATOM_ASIC_SS_ASSIGNMENT));
858 }
859
860 if (entry_found) {
861 ssEntry->speed_spectrum_percentage =
862 le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
863 ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
864
865 if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
866 (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
867 (GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) {
868 ssEntry->speed_spectrum_rate /= 100;
869 }
870
871 switch (ssInfo->ucSpreadSpectrumMode) {
872 case 0:
873 ssEntry->speed_spectrum_mode =
874 pp_atomctrl_spread_spectrum_mode_down;
875 break;
876 case 1:
877 ssEntry->speed_spectrum_mode =
878 pp_atomctrl_spread_spectrum_mode_center;
879 break;
880 default:
881 ssEntry->speed_spectrum_mode =
882 pp_atomctrl_spread_spectrum_mode_down;
883 break;
884 }
885 }
886
887 return entry_found ? 0 : 1;
888 }
889
890 /*
891 * Get the memory clock spread spectrum info
892 */
atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t memory_clock,pp_atomctrl_internal_ss_info * ssInfo)893 int atomctrl_get_memory_clock_spread_spectrum(
894 struct pp_hwmgr *hwmgr,
895 const uint32_t memory_clock,
896 pp_atomctrl_internal_ss_info *ssInfo)
897 {
898 return asic_internal_ss_get_ss_asignment(hwmgr,
899 ASIC_INTERNAL_MEMORY_SS, memory_clock, ssInfo);
900 }
901
902 /*
903 * Get the engine clock spread spectrum info
904 */
atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t engine_clock,pp_atomctrl_internal_ss_info * ssInfo)905 int atomctrl_get_engine_clock_spread_spectrum(
906 struct pp_hwmgr *hwmgr,
907 const uint32_t engine_clock,
908 pp_atomctrl_internal_ss_info *ssInfo)
909 {
910 return asic_internal_ss_get_ss_asignment(hwmgr,
911 ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
912 }
913
atomctrl_read_efuse(struct pp_hwmgr * hwmgr,uint16_t start_index,uint16_t end_index,uint32_t * efuse)914 int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
915 uint16_t end_index, uint32_t *efuse)
916 {
917 struct amdgpu_device *adev = hwmgr->adev;
918 uint32_t mask;
919 int result;
920 READ_EFUSE_VALUE_PARAMETER efuse_param;
921
922 if ((end_index - start_index) == 31)
923 mask = 0xFFFFFFFF;
924 else
925 mask = (1 << ((end_index - start_index) + 1)) - 1;
926
927 efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
928 efuse_param.sEfuse.ucBitShift = (uint8_t)
929 (start_index - ((start_index / 32) * 32));
930 efuse_param.sEfuse.ucBitLength = (uint8_t)
931 ((end_index - start_index) + 1);
932
933 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
934 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
935 (uint32_t *)&efuse_param, sizeof(efuse_param));
936 *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
937
938 return result;
939 }
940
atomctrl_set_ac_timing_ai(struct pp_hwmgr * hwmgr,uint32_t memory_clock,uint8_t level)941 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
942 uint8_t level)
943 {
944 struct amdgpu_device *adev = hwmgr->adev;
945 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
946 int result;
947
948 memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
949 memory_clock & SET_CLOCK_FREQ_MASK;
950 memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
951 ADJUST_MC_SETTING_PARAM;
952 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
953
954 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
955 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
956 (uint32_t *)&memory_clock_parameters, sizeof(memory_clock_parameters));
957
958 return result;
959 }
960
atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint32_t * voltage)961 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
962 uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
963 {
964 struct amdgpu_device *adev = hwmgr->adev;
965 int result;
966 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
967
968 get_voltage_info_param_space.ucVoltageType = voltage_type;
969 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
970 get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
971 get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
972
973 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
974 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
975 (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
976
977 *voltage = result ? 0 :
978 le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
979
980 return result;
981 }
982
atomctrl_get_smc_sclk_range_table(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl_sclk_range_table * table)983 int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
984 {
985
986 int i;
987 u8 frev, crev;
988 u16 size;
989
990 ATOM_SMU_INFO_V2_1 *psmu_info =
991 (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
992 GetIndexIntoMasterTable(DATA, SMU_Info),
993 &size, &frev, &crev);
994
995 if (!psmu_info)
996 return -EINVAL;
997
998 for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
999 table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
1000 table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
1001 table->entry[i].usFcw_pcc =
1002 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
1003 table->entry[i].usFcw_trans_upper =
1004 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
1005 table->entry[i].usRcw_trans_lower =
1006 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
1007 }
1008
1009 return 0;
1010 }
1011
atomctrl_get_vddc_shared_railinfo(struct pp_hwmgr * hwmgr,uint8_t * shared_rail)1012 int atomctrl_get_vddc_shared_railinfo(struct pp_hwmgr *hwmgr, uint8_t *shared_rail)
1013 {
1014 ATOM_SMU_INFO_V2_1 *psmu_info =
1015 (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1016 GetIndexIntoMasterTable(DATA, SMU_Info),
1017 NULL, NULL, NULL);
1018 if (!psmu_info)
1019 return -1;
1020
1021 *shared_rail = psmu_info->ucSharePowerSource;
1022
1023 return 0;
1024 }
1025
atomctrl_get_avfs_information(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl__avfs_parameters * param)1026 int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
1027 struct pp_atom_ctrl__avfs_parameters *param)
1028 {
1029 ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1030
1031 if (param == NULL)
1032 return -EINVAL;
1033
1034 profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1035 smu_atom_get_data_table(hwmgr->adev,
1036 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1037 NULL, NULL, NULL);
1038 if (!profile)
1039 return -1;
1040
1041 param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
1042 param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
1043 param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
1044 param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
1045 param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
1046 param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
1047 param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
1048 param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
1049 param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
1050 param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
1051 param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
1052 param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
1053 param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1054 param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1055 param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1056 param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
1057 param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
1058 param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
1059 param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
1060 param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1061 param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1062 param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1063 param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1064 param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
1065 param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1066
1067 return 0;
1068 }
1069
atomctrl_get_svi2_info(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t * svd_gpio_id,uint8_t * svc_gpio_id,uint16_t * load_line)1070 int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1071 uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
1072 uint16_t *load_line)
1073 {
1074 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
1075 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
1076
1077 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
1078
1079 PP_ASSERT_WITH_CODE((NULL != voltage_info),
1080 "Could not find Voltage Table in BIOS.", return -EINVAL);
1081
1082 voltage_object = atomctrl_lookup_voltage_type_v3
1083 (voltage_info, voltage_type, VOLTAGE_OBJ_SVID2);
1084
1085 *svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId;
1086 *svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId;
1087 *load_line = voltage_object->asSVID2Obj.usLoadLine_PSI;
1088
1089 return 0;
1090 }
1091
atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr * hwmgr,uint16_t * virtual_voltage_id)1092 int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
1093 {
1094 struct amdgpu_device *adev = hwmgr->adev;
1095 SET_VOLTAGE_PS_ALLOCATION allocation;
1096 SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
1097 (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
1098 int result;
1099
1100 voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
1101
1102 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1103 GetIndexIntoMasterTable(COMMAND, SetVoltage),
1104 (uint32_t *)voltage_parameters, sizeof(*voltage_parameters));
1105
1106 *virtual_voltage_id = voltage_parameters->usVoltageLevel;
1107
1108 return result;
1109 }
1110
atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr * hwmgr,uint16_t * vddc,uint16_t * vddci,uint16_t virtual_voltage_id,uint16_t efuse_voltage_id)1111 int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
1112 uint16_t *vddc, uint16_t *vddci,
1113 uint16_t virtual_voltage_id,
1114 uint16_t efuse_voltage_id)
1115 {
1116 int i, j;
1117 int ix;
1118 u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
1119 ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
1120
1121 *vddc = 0;
1122 *vddci = 0;
1123
1124 ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
1125
1126 profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
1127 smu_atom_get_data_table(hwmgr->adev,
1128 ix,
1129 NULL, NULL, NULL);
1130 if (!profile)
1131 return -EINVAL;
1132
1133 if ((profile->asHeader.ucTableFormatRevision >= 2) &&
1134 (profile->asHeader.ucTableContentRevision >= 1) &&
1135 (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
1136 leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
1137 vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
1138 vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
1139 if (profile->ucElbVDDC_Num > 0) {
1140 for (i = 0; i < profile->ucElbVDDC_Num; i++) {
1141 if (vddc_id_buf[i] == virtual_voltage_id) {
1142 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1143 if (efuse_voltage_id <= leakage_bin[j]) {
1144 *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
1145 break;
1146 }
1147 }
1148 break;
1149 }
1150 }
1151 }
1152
1153 vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
1154 vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
1155 if (profile->ucElbVDDCI_Num > 0) {
1156 for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
1157 if (vddci_id_buf[i] == virtual_voltage_id) {
1158 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1159 if (efuse_voltage_id <= leakage_bin[j]) {
1160 *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
1161 break;
1162 }
1163 }
1164 break;
1165 }
1166 }
1167 }
1168 }
1169
1170 return 0;
1171 }
1172
atomctrl_get_voltage_range(struct pp_hwmgr * hwmgr,uint32_t * max_vddc,uint32_t * min_vddc)1173 void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
1174 uint32_t *min_vddc)
1175 {
1176 void *profile;
1177
1178 profile = smu_atom_get_data_table(hwmgr->adev,
1179 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1180 NULL, NULL, NULL);
1181
1182 if (profile) {
1183 switch (hwmgr->chip_id) {
1184 case CHIP_TONGA:
1185 case CHIP_FIJI:
1186 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
1187 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
1188 return;
1189 case CHIP_POLARIS11:
1190 case CHIP_POLARIS10:
1191 case CHIP_POLARIS12:
1192 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
1193 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
1194 return;
1195 default:
1196 break;
1197 }
1198 }
1199 *max_vddc = 0;
1200 *min_vddc = 0;
1201 }
1202
atomctrl_get_edc_hilo_leakage_offset_table(struct pp_hwmgr * hwmgr,AtomCtrl_HiLoLeakageOffsetTable * table)1203 int atomctrl_get_edc_hilo_leakage_offset_table(struct pp_hwmgr *hwmgr,
1204 AtomCtrl_HiLoLeakageOffsetTable *table)
1205 {
1206 ATOM_GFX_INFO_V2_3 *gfxinfo = smu_atom_get_data_table(hwmgr->adev,
1207 GetIndexIntoMasterTable(DATA, GFX_Info),
1208 NULL, NULL, NULL);
1209 if (!gfxinfo)
1210 return -ENOENT;
1211
1212 table->usHiLoLeakageThreshold = gfxinfo->usHiLoLeakageThreshold;
1213 table->usEdcDidtLoDpm7TableOffset = gfxinfo->usEdcDidtLoDpm7TableOffset;
1214 table->usEdcDidtHiDpm7TableOffset = gfxinfo->usEdcDidtHiDpm7TableOffset;
1215
1216 return 0;
1217 }
1218
get_edc_leakage_table(struct pp_hwmgr * hwmgr,uint16_t offset)1219 static AtomCtrl_EDCLeakgeTable *get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1220 uint16_t offset)
1221 {
1222 void *table_address;
1223 char *temp;
1224
1225 table_address = smu_atom_get_data_table(hwmgr->adev,
1226 GetIndexIntoMasterTable(DATA, GFX_Info),
1227 NULL, NULL, NULL);
1228 if (!table_address)
1229 return NULL;
1230
1231 temp = (char *)table_address;
1232 table_address += offset;
1233
1234 return (AtomCtrl_EDCLeakgeTable *)temp;
1235 }
1236
atomctrl_get_edc_leakage_table(struct pp_hwmgr * hwmgr,AtomCtrl_EDCLeakgeTable * table,uint16_t offset)1237 int atomctrl_get_edc_leakage_table(struct pp_hwmgr *hwmgr,
1238 AtomCtrl_EDCLeakgeTable *table,
1239 uint16_t offset)
1240 {
1241 uint32_t length, i;
1242 AtomCtrl_EDCLeakgeTable *leakage_table =
1243 get_edc_leakage_table(hwmgr, offset);
1244
1245 if (!leakage_table)
1246 return -ENOENT;
1247
1248 length = sizeof(leakage_table->DIDT_REG) /
1249 sizeof(leakage_table->DIDT_REG[0]);
1250 for (i = 0; i < length; i++)
1251 table->DIDT_REG[i] = leakage_table->DIDT_REG[i];
1252
1253 return 0;
1254 }
1255