1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include "linux/delay.h"
26 #include <linux/types.h>
27 #include <linux/pci.h>
28
29 #include "smumgr.h"
30 #include "pp_debug.h"
31 #include "ci_smumgr.h"
32 #include "ppsmc.h"
33 #include "smu7_hwmgr.h"
34 #include "hardwaremanager.h"
35 #include "ppatomctrl.h"
36 #include "cgs_common.h"
37 #include "atombios.h"
38 #include "pppcielanes.h"
39 #include "smu7_smumgr.h"
40
41 #include "smu/smu_7_0_1_d.h"
42 #include "smu/smu_7_0_1_sh_mask.h"
43
44 #include "dce/dce_8_0_d.h"
45 #include "dce/dce_8_0_sh_mask.h"
46
47 #include "bif/bif_4_1_d.h"
48 #include "bif/bif_4_1_sh_mask.h"
49
50 #include "gca/gfx_7_2_d.h"
51 #include "gca/gfx_7_2_sh_mask.h"
52
53 #include "gmc/gmc_7_1_d.h"
54 #include "gmc/gmc_7_1_sh_mask.h"
55
56 #include "processpptables.h"
57
58 #define MC_CG_ARB_FREQ_F0 0x0a
59 #define MC_CG_ARB_FREQ_F1 0x0b
60 #define MC_CG_ARB_FREQ_F2 0x0c
61 #define MC_CG_ARB_FREQ_F3 0x0d
62
63 #define SMC_RAM_END 0x40000
64
65 #define CISLAND_MINIMUM_ENGINE_CLOCK 800
66 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
67
68 static const struct ci_pt_defaults defaults_hawaii_xt = {
69 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
70 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
71 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
72 };
73
74 static const struct ci_pt_defaults defaults_hawaii_pro = {
75 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
76 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
77 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
78 };
79
80 static const struct ci_pt_defaults defaults_bonaire_xt = {
81 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
82 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
83 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
84 };
85
86
87 static const struct ci_pt_defaults defaults_saturn_xt = {
88 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
89 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
90 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
91 };
92
93
ci_set_smc_sram_address(struct pp_hwmgr * hwmgr,uint32_t smc_addr,uint32_t limit)94 static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
95 uint32_t smc_addr, uint32_t limit)
96 {
97 if ((0 != (3 & smc_addr))
98 || ((smc_addr + 3) >= limit)) {
99 pr_err("smc_addr invalid \n");
100 return -EINVAL;
101 }
102
103 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
104 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
105 return 0;
106 }
107
ci_copy_bytes_to_smc(struct pp_hwmgr * hwmgr,uint32_t smc_start_address,const uint8_t * src,uint32_t byte_count,uint32_t limit)108 static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
109 const uint8_t *src, uint32_t byte_count, uint32_t limit)
110 {
111 int result;
112 uint32_t data = 0;
113 uint32_t original_data;
114 uint32_t addr = 0;
115 uint32_t extra_shift;
116
117 if ((3 & smc_start_address)
118 || ((smc_start_address + byte_count) >= limit)) {
119 pr_err("smc_start_address invalid \n");
120 return -EINVAL;
121 }
122
123 addr = smc_start_address;
124
125 while (byte_count >= 4) {
126 /* Bytes are written into the SMC address space with the MSB first. */
127 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
128
129 result = ci_set_smc_sram_address(hwmgr, addr, limit);
130
131 if (0 != result)
132 return result;
133
134 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
135
136 src += 4;
137 byte_count -= 4;
138 addr += 4;
139 }
140
141 if (0 != byte_count) {
142
143 data = 0;
144
145 result = ci_set_smc_sram_address(hwmgr, addr, limit);
146
147 if (0 != result)
148 return result;
149
150
151 original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
152
153 extra_shift = 8 * (4 - byte_count);
154
155 while (byte_count > 0) {
156 /* Bytes are written into the SMC addres space with the MSB first. */
157 data = (0x100 * data) + *src++;
158 byte_count--;
159 }
160
161 data <<= extra_shift;
162
163 data |= (original_data & ~((~0UL) << extra_shift));
164
165 result = ci_set_smc_sram_address(hwmgr, addr, limit);
166
167 if (0 != result)
168 return result;
169
170 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
171 }
172
173 return 0;
174 }
175
176
ci_program_jump_on_start(struct pp_hwmgr * hwmgr)177 static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
178 {
179 static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
180
181 ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
182
183 return 0;
184 }
185
ci_is_smc_ram_running(struct pp_hwmgr * hwmgr)186 static bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
187 {
188 return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
189 CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
190 && (0x20100 <= cgs_read_ind_register(hwmgr->device,
191 CGS_IND_REG__SMC, ixSMC_PC_C)));
192 }
193
ci_read_smc_sram_dword(struct pp_hwmgr * hwmgr,uint32_t smc_addr,uint32_t * value,uint32_t limit)194 static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
195 uint32_t *value, uint32_t limit)
196 {
197 int result;
198
199 result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
200
201 if (result)
202 return result;
203
204 *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
205 return 0;
206 }
207
ci_send_msg_to_smc(struct pp_hwmgr * hwmgr,uint16_t msg)208 static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
209 {
210 struct amdgpu_device *adev = hwmgr->adev;
211 int ret;
212
213 cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
214 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
215
216 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
217
218 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
219
220 if (ret != 1)
221 dev_info(adev->dev,
222 "failed to send message %x ret is %d\n", msg,ret);
223
224 return 0;
225 }
226
ci_send_msg_to_smc_with_parameter(struct pp_hwmgr * hwmgr,uint16_t msg,uint32_t parameter)227 static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
228 uint16_t msg, uint32_t parameter)
229 {
230 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
231 return ci_send_msg_to_smc(hwmgr, msg);
232 }
233
ci_initialize_power_tune_defaults(struct pp_hwmgr * hwmgr)234 static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
235 {
236 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
237 struct amdgpu_device *adev = hwmgr->adev;
238 uint32_t dev_id;
239
240 dev_id = adev->pdev->device;
241
242 switch (dev_id) {
243 case 0x67BA:
244 case 0x67B1:
245 smu_data->power_tune_defaults = &defaults_hawaii_pro;
246 break;
247 case 0x67B8:
248 case 0x67B0:
249 smu_data->power_tune_defaults = &defaults_hawaii_xt;
250 break;
251 case 0x6640:
252 case 0x6641:
253 case 0x6646:
254 case 0x6647:
255 smu_data->power_tune_defaults = &defaults_saturn_xt;
256 break;
257 case 0x6649:
258 case 0x6650:
259 case 0x6651:
260 case 0x6658:
261 case 0x665C:
262 case 0x665D:
263 case 0x67A0:
264 case 0x67A1:
265 case 0x67A2:
266 case 0x67A8:
267 case 0x67A9:
268 case 0x67AA:
269 case 0x67B9:
270 case 0x67BE:
271 default:
272 smu_data->power_tune_defaults = &defaults_bonaire_xt;
273 break;
274 }
275 }
276
ci_get_dependency_volt_by_clk(struct pp_hwmgr * hwmgr,struct phm_clock_voltage_dependency_table * allowed_clock_voltage_table,uint32_t clock,uint32_t * vol)277 static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
278 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
279 uint32_t clock, uint32_t *vol)
280 {
281 uint32_t i = 0;
282
283 if (allowed_clock_voltage_table->count == 0)
284 return -EINVAL;
285
286 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
287 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
288 *vol = allowed_clock_voltage_table->entries[i].v;
289 return 0;
290 }
291 }
292
293 *vol = allowed_clock_voltage_table->entries[i - 1].v;
294 return 0;
295 }
296
ci_calculate_sclk_params(struct pp_hwmgr * hwmgr,uint32_t clock,struct SMU7_Discrete_GraphicsLevel * sclk)297 static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
298 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
299 {
300 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
301 struct pp_atomctrl_clock_dividers_vi dividers;
302 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
303 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
304 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
305 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
306 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
307 uint32_t ref_clock;
308 uint32_t ref_divider;
309 uint32_t fbdiv;
310 int result;
311
312 /* get the engine clock dividers for this clock value */
313 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs);
314
315 PP_ASSERT_WITH_CODE(result == 0,
316 "Error retrieving Engine Clock dividers from VBIOS.",
317 return result);
318
319 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
320 ref_clock = atomctrl_get_reference_clock(hwmgr);
321 ref_divider = 1 + dividers.uc_pll_ref_div;
322
323 /* low 14 bits is fraction and high 12 bits is divider */
324 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
325
326 /* SPLL_FUNC_CNTL setup */
327 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
328 SPLL_REF_DIV, dividers.uc_pll_ref_div);
329 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
330 SPLL_PDIV_A, dividers.uc_pll_post_div);
331
332 /* SPLL_FUNC_CNTL_3 setup*/
333 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
334 SPLL_FB_DIV, fbdiv);
335
336 /* set to use fractional accumulation*/
337 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
338 SPLL_DITHEN, 1);
339
340 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
341 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
342 struct pp_atomctrl_internal_ss_info ss_info;
343 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
344
345 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
346 vco_freq, &ss_info)) {
347 uint32_t clk_s = ref_clock * 5 /
348 (ref_divider * ss_info.speed_spectrum_rate);
349 uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
350 fbdiv / (clk_s * 10000);
351
352 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
353 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
354 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
355 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
356 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
357 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
358 }
359 }
360
361 sclk->SclkFrequency = clock;
362 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
363 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
364 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
365 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
366 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
367
368 return 0;
369 }
370
ci_populate_phase_value_based_on_sclk(struct pp_hwmgr * hwmgr,const struct phm_phase_shedding_limits_table * pl,uint32_t sclk,uint32_t * p_shed)371 static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
372 const struct phm_phase_shedding_limits_table *pl,
373 uint32_t sclk, uint32_t *p_shed)
374 {
375 unsigned int i;
376
377 /* use the minimum phase shedding */
378 *p_shed = 1;
379
380 for (i = 0; i < pl->count; i++) {
381 if (sclk < pl->entries[i].Sclk) {
382 *p_shed = i;
383 break;
384 }
385 }
386 }
387
ci_get_sleep_divider_id_from_clock(uint32_t clock,uint32_t clock_insr)388 static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
389 uint32_t clock_insr)
390 {
391 uint8_t i;
392 uint32_t temp;
393 uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
394
395 if (clock < min) {
396 pr_info("Engine clock can't satisfy stutter requirement!\n");
397 return 0;
398 }
399 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
400 temp = clock >> i;
401
402 if (temp >= min || i == 0)
403 break;
404 }
405 return i;
406 }
407
ci_populate_single_graphic_level(struct pp_hwmgr * hwmgr,uint32_t clock,struct SMU7_Discrete_GraphicsLevel * level)408 static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
409 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level)
410 {
411 int result;
412 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
413
414
415 result = ci_calculate_sclk_params(hwmgr, clock, level);
416
417 /* populate graphics levels */
418 result = ci_get_dependency_volt_by_clk(hwmgr,
419 hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
420 (uint32_t *)(&level->MinVddc));
421 if (result) {
422 pr_err("vdd_dep_on_sclk table is NULL\n");
423 return result;
424 }
425
426 level->SclkFrequency = clock;
427 level->MinVddcPhases = 1;
428
429 if (data->vddc_phase_shed_control)
430 ci_populate_phase_value_based_on_sclk(hwmgr,
431 hwmgr->dyn_state.vddc_phase_shed_limits_table,
432 clock,
433 &level->MinVddcPhases);
434
435 level->ActivityLevel = data->current_profile_setting.sclk_activity;
436 level->CcPwrDynRm = 0;
437 level->CcPwrDynRm1 = 0;
438 level->EnabledForActivity = 0;
439 /* this level can be used for throttling.*/
440 level->EnabledForThrottle = 1;
441 level->UpH = data->current_profile_setting.sclk_up_hyst;
442 level->DownH = data->current_profile_setting.sclk_down_hyst;
443 level->VoltageDownH = 0;
444 level->PowerThrottle = 0;
445
446
447 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
448 PHM_PlatformCaps_SclkDeepSleep))
449 level->DeepSleepDivId =
450 ci_get_sleep_divider_id_from_clock(clock,
451 CISLAND_MINIMUM_ENGINE_CLOCK);
452
453 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
454 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
455
456 if (0 == result) {
457 level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
458 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
459 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
460 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
461 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
462 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
463 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
464 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
465 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
466 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
467 }
468
469 return result;
470 }
471
ci_populate_all_graphic_levels(struct pp_hwmgr * hwmgr)472 static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
473 {
474 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
475 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
476 struct smu7_dpm_table *dpm_table = &data->dpm_table;
477 int result = 0;
478 uint32_t array = smu_data->dpm_table_start +
479 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
480 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
481 SMU7_MAX_LEVELS_GRAPHICS;
482 struct SMU7_Discrete_GraphicsLevel *levels =
483 smu_data->smc_state_table.GraphicsLevel;
484 uint32_t i;
485
486 for (i = 0; i < dpm_table->sclk_table.count; i++) {
487 result = ci_populate_single_graphic_level(hwmgr,
488 dpm_table->sclk_table.dpm_levels[i].value,
489 &levels[i]);
490 if (result)
491 return result;
492 if (i > 1)
493 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
494 if (i == (dpm_table->sclk_table.count - 1))
495 smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
496 PPSMC_DISPLAY_WATERMARK_HIGH;
497 }
498
499 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
500
501 smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
502 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
503 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
504
505 result = ci_copy_bytes_to_smc(hwmgr, array,
506 (u8 *)levels, array_size,
507 SMC_RAM_END);
508
509 return result;
510
511 }
512
ci_populate_svi_load_line(struct pp_hwmgr * hwmgr)513 static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
514 {
515 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
516 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
517
518 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
519 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
520 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
521 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
522
523 return 0;
524 }
525
ci_populate_tdc_limit(struct pp_hwmgr * hwmgr)526 static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
527 {
528 uint16_t tdc_limit;
529 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
530 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
531
532 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
533 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
534 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
535 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
536 defaults->tdc_vddc_throttle_release_limit_perc;
537 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
538
539 return 0;
540 }
541
ci_populate_dw8(struct pp_hwmgr * hwmgr,uint32_t fuse_table_offset)542 static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
543 {
544 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
545 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
546
547 if (ci_read_smc_sram_dword(hwmgr,
548 fuse_table_offset +
549 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
550 (uint32_t *)&smu_data->power_tune_table.TdcWaterfallCtl, SMC_RAM_END))
551 PP_ASSERT_WITH_CODE(false,
552 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
553 return -EINVAL);
554 else
555 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
556
557 return 0;
558 }
559
ci_populate_fuzzy_fan(struct pp_hwmgr * hwmgr,uint32_t fuse_table_offset)560 static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
561 {
562 uint16_t tmp;
563 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
564
565 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
566 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
567 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
568 else
569 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
570
571 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
572
573 return 0;
574 }
575
ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr * hwmgr)576 static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
577 {
578 int i;
579 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
580 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
581 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
582 uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
583
584 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
585 "The CAC Leakage table does not exist!", return -EINVAL);
586 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
587 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
588 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
589 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
590
591 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
592 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
593 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
594 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
595 hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
596 } else {
597 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
598 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
599 }
600 }
601
602 return 0;
603 }
604
ci_populate_vddc_vid(struct pp_hwmgr * hwmgr)605 static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
606 {
607 int i;
608 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
609 uint8_t *vid = smu_data->power_tune_table.VddCVid;
610 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
611
612 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
613 "There should never be more than 8 entries for VddcVid!!!",
614 return -EINVAL);
615
616 for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
617 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
618
619 return 0;
620 }
621
ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr * hwmgr)622 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
623 {
624 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
625 u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
626 u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
627 int i, min, max;
628
629 min = max = hi_vid[0];
630 for (i = 0; i < 8; i++) {
631 if (0 != hi_vid[i]) {
632 if (min > hi_vid[i])
633 min = hi_vid[i];
634 if (max < hi_vid[i])
635 max = hi_vid[i];
636 }
637
638 if (0 != lo_vid[i]) {
639 if (min > lo_vid[i])
640 min = lo_vid[i];
641 if (max < lo_vid[i])
642 max = lo_vid[i];
643 }
644 }
645
646 if ((min == 0) || (max == 0))
647 return -EINVAL;
648 smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
649 smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
650
651 return 0;
652 }
653
ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr * hwmgr)654 static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
655 {
656 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
657 uint16_t HiSidd;
658 uint16_t LoSidd;
659 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
660
661 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
662 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
663
664 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
665 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
666 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
667 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
668
669 return 0;
670 }
671
ci_populate_pm_fuses(struct pp_hwmgr * hwmgr)672 static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
673 {
674 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
675 uint32_t pm_fuse_table_offset;
676 int ret = 0;
677
678 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
679 PHM_PlatformCaps_PowerContainment)) {
680 if (ci_read_smc_sram_dword(hwmgr,
681 SMU7_FIRMWARE_HEADER_LOCATION +
682 offsetof(SMU7_Firmware_Header, PmFuseTable),
683 &pm_fuse_table_offset, SMC_RAM_END)) {
684 pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
685 return -EINVAL;
686 }
687
688 /* DW0 - DW3 */
689 ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
690 /* DW4 - DW5 */
691 ret |= ci_populate_vddc_vid(hwmgr);
692 /* DW6 */
693 ret |= ci_populate_svi_load_line(hwmgr);
694 /* DW7 */
695 ret |= ci_populate_tdc_limit(hwmgr);
696 /* DW8 */
697 ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
698
699 ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
700
701 ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
702
703 ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
704 if (ret)
705 return ret;
706
707 ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
708 (uint8_t *)&smu_data->power_tune_table,
709 sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
710 }
711 return ret;
712 }
713
ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr * hwmgr)714 static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
715 {
716 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
717 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
718 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
719 SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
720 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
721 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
722 const uint16_t *def1, *def2;
723 int i, j, k;
724
725 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
726 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
727
728 dpm_table->DTETjOffset = 0;
729 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
730 dpm_table->GpuTjHyst = 8;
731
732 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
733
734 if (ppm) {
735 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
736 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
737 } else {
738 dpm_table->PPM_PkgPwrLimit = 0;
739 dpm_table->PPM_TemperatureLimit = 0;
740 }
741
742 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
743 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
744
745 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
746 def1 = defaults->bapmti_r;
747 def2 = defaults->bapmti_rc;
748
749 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
750 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
751 for (k = 0; k < SMU7_DTE_SINKS; k++) {
752 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
753 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
754 def1++;
755 def2++;
756 }
757 }
758 }
759
760 return 0;
761 }
762
ci_get_std_voltage_value_sidd(struct pp_hwmgr * hwmgr,pp_atomctrl_voltage_table_entry * tab,uint16_t * hi,uint16_t * lo)763 static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
764 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
765 uint16_t *lo)
766 {
767 uint16_t v_index;
768 bool vol_found = false;
769 *hi = tab->value * VOLTAGE_SCALE;
770 *lo = tab->value * VOLTAGE_SCALE;
771
772 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
773 "The SCLK/VDDC Dependency Table does not exist.\n",
774 return -EINVAL);
775
776 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
777 pr_warn("CAC Leakage Table does not exist, using vddc.\n");
778 return 0;
779 }
780
781 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
782 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
783 vol_found = true;
784 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
785 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
786 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
787 } else {
788 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
789 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
790 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
791 }
792 break;
793 }
794 }
795
796 if (!vol_found) {
797 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
798 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
799 vol_found = true;
800 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
801 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
802 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
803 } else {
804 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
805 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
806 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
807 }
808 break;
809 }
810 }
811
812 if (!vol_found)
813 pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
814 }
815
816 return 0;
817 }
818
ci_populate_smc_voltage_table(struct pp_hwmgr * hwmgr,pp_atomctrl_voltage_table_entry * tab,SMU7_Discrete_VoltageLevel * smc_voltage_tab)819 static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
820 pp_atomctrl_voltage_table_entry *tab,
821 SMU7_Discrete_VoltageLevel *smc_voltage_tab)
822 {
823 int result;
824
825 result = ci_get_std_voltage_value_sidd(hwmgr, tab,
826 &smc_voltage_tab->StdVoltageHiSidd,
827 &smc_voltage_tab->StdVoltageLoSidd);
828 if (result) {
829 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
830 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
831 }
832
833 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
834 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
835 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
836
837 return 0;
838 }
839
ci_populate_smc_vddc_table(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)840 static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
841 SMU7_Discrete_DpmTable *table)
842 {
843 unsigned int count;
844 int result;
845 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
846
847 table->VddcLevelCount = data->vddc_voltage_table.count;
848 for (count = 0; count < table->VddcLevelCount; count++) {
849 result = ci_populate_smc_voltage_table(hwmgr,
850 &(data->vddc_voltage_table.entries[count]),
851 &(table->VddcLevel[count]));
852 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
853
854 /* GPIO voltage control */
855 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
856 table->VddcLevel[count].Smio = (uint8_t) count;
857 table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
858 table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
859 } else {
860 table->VddcLevel[count].Smio = 0;
861 }
862 }
863
864 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
865
866 return 0;
867 }
868
ci_populate_smc_vdd_ci_table(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)869 static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
870 SMU7_Discrete_DpmTable *table)
871 {
872 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
873 uint32_t count;
874 int result;
875
876 table->VddciLevelCount = data->vddci_voltage_table.count;
877
878 for (count = 0; count < table->VddciLevelCount; count++) {
879 result = ci_populate_smc_voltage_table(hwmgr,
880 &(data->vddci_voltage_table.entries[count]),
881 &(table->VddciLevel[count]));
882 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
883 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
884 table->VddciLevel[count].Smio = (uint8_t) count;
885 table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
886 table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
887 } else {
888 table->VddciLevel[count].Smio = 0;
889 }
890 }
891
892 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
893
894 return 0;
895 }
896
ci_populate_smc_mvdd_table(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)897 static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
898 SMU7_Discrete_DpmTable *table)
899 {
900 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
901 uint32_t count;
902 int result;
903
904 table->MvddLevelCount = data->mvdd_voltage_table.count;
905
906 for (count = 0; count < table->MvddLevelCount; count++) {
907 result = ci_populate_smc_voltage_table(hwmgr,
908 &(data->mvdd_voltage_table.entries[count]),
909 &table->MvddLevel[count]);
910 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
911 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
912 table->MvddLevel[count].Smio = (uint8_t) count;
913 table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
914 table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
915 } else {
916 table->MvddLevel[count].Smio = 0;
917 }
918 }
919
920 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
921
922 return 0;
923 }
924
925
ci_populate_smc_voltage_tables(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)926 static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
927 SMU7_Discrete_DpmTable *table)
928 {
929 int result;
930
931 result = ci_populate_smc_vddc_table(hwmgr, table);
932 PP_ASSERT_WITH_CODE(0 == result,
933 "can not populate VDDC voltage table to SMC", return -EINVAL);
934
935 result = ci_populate_smc_vdd_ci_table(hwmgr, table);
936 PP_ASSERT_WITH_CODE(0 == result,
937 "can not populate VDDCI voltage table to SMC", return -EINVAL);
938
939 result = ci_populate_smc_mvdd_table(hwmgr, table);
940 PP_ASSERT_WITH_CODE(0 == result,
941 "can not populate MVDD voltage table to SMC", return -EINVAL);
942
943 return 0;
944 }
945
ci_populate_ulv_level(struct pp_hwmgr * hwmgr,struct SMU7_Discrete_Ulv * state)946 static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
947 struct SMU7_Discrete_Ulv *state)
948 {
949 uint32_t voltage_response_time, ulv_voltage;
950 int result;
951 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
952
953 state->CcPwrDynRm = 0;
954 state->CcPwrDynRm1 = 0;
955
956 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
957 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
958
959 if (ulv_voltage == 0) {
960 data->ulv_supported = false;
961 return 0;
962 }
963
964 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
965 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
966 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
967 state->VddcOffset = 0;
968 else
969 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
970 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
971 } else {
972 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
973 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
974 state->VddcOffsetVid = 0;
975 else /* used in SVI2 Mode */
976 state->VddcOffsetVid = (uint8_t)(
977 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
978 * VOLTAGE_VID_OFFSET_SCALE2
979 / VOLTAGE_VID_OFFSET_SCALE1);
980 }
981 state->VddcPhase = 1;
982
983 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
984 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
985 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
986
987 return 0;
988 }
989
ci_populate_ulv_state(struct pp_hwmgr * hwmgr,SMU7_Discrete_Ulv * ulv_level)990 static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
991 SMU7_Discrete_Ulv *ulv_level)
992 {
993 return ci_populate_ulv_level(hwmgr, ulv_level);
994 }
995
ci_populate_smc_link_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)996 static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
997 {
998 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
999 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1000 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1001 uint32_t i;
1002
1003 /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
1004 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1005 table->LinkLevel[i].PcieGenSpeed =
1006 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1007 table->LinkLevel[i].PcieLaneCount =
1008 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1009 table->LinkLevel[i].EnabledForActivity = 1;
1010 table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
1011 table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
1012 }
1013
1014 smu_data->smc_state_table.LinkLevelCount =
1015 (uint8_t)dpm_table->pcie_speed_table.count;
1016 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1017 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1018
1019 return 0;
1020 }
1021
ci_calculate_mclk_params(struct pp_hwmgr * hwmgr,uint32_t memory_clock,SMU7_Discrete_MemoryLevel * mclk,bool strobe_mode,bool dllStateOn)1022 static int ci_calculate_mclk_params(
1023 struct pp_hwmgr *hwmgr,
1024 uint32_t memory_clock,
1025 SMU7_Discrete_MemoryLevel *mclk,
1026 bool strobe_mode,
1027 bool dllStateOn
1028 )
1029 {
1030 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1031 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1032 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1033 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1034 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1035 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1036 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1037 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1038 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1039 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1040
1041 pp_atomctrl_memory_clock_param mpll_param;
1042 int result;
1043
1044 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1045 memory_clock, &mpll_param, strobe_mode);
1046 PP_ASSERT_WITH_CODE(0 == result,
1047 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1048
1049 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1050
1051 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1052 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1053 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1054 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1055 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1056 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1057
1058 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1059 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1060
1061 if (data->is_memory_gddr5) {
1062 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1063 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1064 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1065 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1066 }
1067
1068 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1069 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1070 pp_atomctrl_internal_ss_info ss_info;
1071 uint32_t freq_nom;
1072 uint32_t tmp;
1073 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1074
1075 /* for GDDR5 for all modes and DDR3 */
1076 if (1 == mpll_param.qdr)
1077 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1078 else
1079 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1080
1081 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1082 tmp = (freq_nom / reference_clock);
1083 tmp = tmp * tmp;
1084
1085 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1086 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1087 uint32_t clkv =
1088 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1089 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1090
1091 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1092 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1093 }
1094 }
1095
1096 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1097 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1098 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1099 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1100 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1101 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1102
1103
1104 mclk->MclkFrequency = memory_clock;
1105 mclk->MpllFuncCntl = mpll_func_cntl;
1106 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1107 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1108 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1109 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1110 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1111 mclk->DllCntl = dll_cntl;
1112 mclk->MpllSs1 = mpll_ss1;
1113 mclk->MpllSs2 = mpll_ss2;
1114
1115 return 0;
1116 }
1117
ci_get_mclk_frequency_ratio(uint32_t memory_clock,bool strobe_mode)1118 static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
1119 bool strobe_mode)
1120 {
1121 uint8_t mc_para_index;
1122
1123 if (strobe_mode) {
1124 if (memory_clock < 12500)
1125 mc_para_index = 0x00;
1126 else if (memory_clock > 47500)
1127 mc_para_index = 0x0f;
1128 else
1129 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1130 } else {
1131 if (memory_clock < 65000)
1132 mc_para_index = 0x00;
1133 else if (memory_clock > 135000)
1134 mc_para_index = 0x0f;
1135 else
1136 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1137 }
1138
1139 return mc_para_index;
1140 }
1141
ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)1142 static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1143 {
1144 uint8_t mc_para_index;
1145
1146 if (memory_clock < 10000)
1147 mc_para_index = 0;
1148 else if (memory_clock >= 80000)
1149 mc_para_index = 0x0f;
1150 else
1151 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1152
1153 return mc_para_index;
1154 }
1155
ci_populate_phase_value_based_on_mclk(struct pp_hwmgr * hwmgr,const struct phm_phase_shedding_limits_table * pl,uint32_t memory_clock,uint32_t * p_shed)1156 static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1157 uint32_t memory_clock, uint32_t *p_shed)
1158 {
1159 unsigned int i;
1160
1161 *p_shed = 1;
1162
1163 for (i = 0; i < pl->count; i++) {
1164 if (memory_clock < pl->entries[i].Mclk) {
1165 *p_shed = i;
1166 break;
1167 }
1168 }
1169
1170 return 0;
1171 }
1172
ci_populate_single_memory_level(struct pp_hwmgr * hwmgr,uint32_t memory_clock,SMU7_Discrete_MemoryLevel * memory_level)1173 static int ci_populate_single_memory_level(
1174 struct pp_hwmgr *hwmgr,
1175 uint32_t memory_clock,
1176 SMU7_Discrete_MemoryLevel *memory_level
1177 )
1178 {
1179 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1180 int result = 0;
1181 bool dll_state_on;
1182 uint32_t mclk_edc_wr_enable_threshold = 40000;
1183 uint32_t mclk_edc_enable_threshold = 40000;
1184 uint32_t mclk_strobe_mode_threshold = 40000;
1185
1186 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1187 result = ci_get_dependency_volt_by_clk(hwmgr,
1188 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1189 PP_ASSERT_WITH_CODE((0 == result),
1190 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1191 }
1192
1193 if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1194 result = ci_get_dependency_volt_by_clk(hwmgr,
1195 hwmgr->dyn_state.vddci_dependency_on_mclk,
1196 memory_clock,
1197 &memory_level->MinVddci);
1198 PP_ASSERT_WITH_CODE((0 == result),
1199 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1200 }
1201
1202 if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1203 result = ci_get_dependency_volt_by_clk(hwmgr,
1204 hwmgr->dyn_state.mvdd_dependency_on_mclk,
1205 memory_clock,
1206 &memory_level->MinMvdd);
1207 PP_ASSERT_WITH_CODE((0 == result),
1208 "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
1209 }
1210
1211 memory_level->MinVddcPhases = 1;
1212
1213 if (data->vddc_phase_shed_control) {
1214 ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1215 memory_clock, &memory_level->MinVddcPhases);
1216 }
1217
1218 memory_level->EnabledForThrottle = 1;
1219 memory_level->EnabledForActivity = 0;
1220 memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
1221 memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
1222 memory_level->VoltageDownH = 0;
1223
1224 /* Indicates maximum activity level for this performance level.*/
1225 memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1226 memory_level->StutterEnable = 0;
1227 memory_level->StrobeEnable = 0;
1228 memory_level->EdcReadEnable = 0;
1229 memory_level->EdcWriteEnable = 0;
1230 memory_level->RttEnable = 0;
1231
1232 /* default set to low watermark. Highest level will be set to high later.*/
1233 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1234
1235 data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1236 data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
1237
1238 /* stutter mode not support on ci */
1239
1240 /* decide strobe mode*/
1241 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1242 (memory_clock <= mclk_strobe_mode_threshold);
1243
1244 /* decide EDC mode and memory clock ratio*/
1245 if (data->is_memory_gddr5) {
1246 memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
1247 memory_level->StrobeEnable);
1248
1249 if ((mclk_edc_enable_threshold != 0) &&
1250 (memory_clock > mclk_edc_enable_threshold)) {
1251 memory_level->EdcReadEnable = 1;
1252 }
1253
1254 if ((mclk_edc_wr_enable_threshold != 0) &&
1255 (memory_clock > mclk_edc_wr_enable_threshold)) {
1256 memory_level->EdcWriteEnable = 1;
1257 }
1258
1259 if (memory_level->StrobeEnable) {
1260 if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
1261 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1262 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1263 else
1264 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1265 } else
1266 dll_state_on = data->dll_default_on;
1267 } else {
1268 memory_level->StrobeRatio =
1269 ci_get_ddr3_mclk_frequency_ratio(memory_clock);
1270 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1271 }
1272
1273 result = ci_calculate_mclk_params(hwmgr,
1274 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1275
1276 if (0 == result) {
1277 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1278 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1279 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1280 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1281 /* MCLK frequency in units of 10KHz*/
1282 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1283 /* Indicates maximum activity level for this performance level.*/
1284 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1285 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1286 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1287 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1288 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1289 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1290 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1291 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1292 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1293 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1294 }
1295
1296 return result;
1297 }
1298
ci_populate_all_memory_levels(struct pp_hwmgr * hwmgr)1299 static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1300 {
1301 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1302 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1303 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1304 int result;
1305 struct amdgpu_device *adev = hwmgr->adev;
1306 uint32_t dev_id;
1307
1308 uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
1309 uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
1310 SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1311 uint32_t i;
1312
1313 memset(levels, 0x00, level_array_size);
1314
1315 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1316 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1317 "can not populate memory level as memory clock is zero", return -EINVAL);
1318 result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1319 &(smu_data->smc_state_table.MemoryLevel[i]));
1320 if (0 != result)
1321 return result;
1322 }
1323
1324 if (data->mclk_dpm_key_disabled && dpm_table->mclk_table.count) {
1325 /* Populate the table with the highest MCLK level when MCLK DPM is disabled */
1326 for (i = 0; i < dpm_table->mclk_table.count - 1; i++) {
1327 levels[i] = levels[dpm_table->mclk_table.count - 1];
1328 levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1329 }
1330 }
1331
1332 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1333
1334 dev_id = adev->pdev->device;
1335
1336 if ((dpm_table->mclk_table.count >= 2) &&
1337 ((dev_id == 0x67B0) || (dev_id == 0x67B1)) &&
1338 (adev->pdev->revision == 0)) {
1339 smu_data->smc_state_table.MemoryLevel[1].MinVddc =
1340 smu_data->smc_state_table.MemoryLevel[0].MinVddc;
1341 smu_data->smc_state_table.MemoryLevel[1].MinVddcPhases =
1342 smu_data->smc_state_table.MemoryLevel[0].MinVddcPhases;
1343 }
1344 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1345 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1346
1347 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1348 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1349 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1350
1351 result = ci_copy_bytes_to_smc(hwmgr,
1352 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1353 SMC_RAM_END);
1354
1355 return result;
1356 }
1357
ci_populate_mvdd_value(struct pp_hwmgr * hwmgr,uint32_t mclk,SMU7_Discrete_VoltageLevel * voltage)1358 static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1359 SMU7_Discrete_VoltageLevel *voltage)
1360 {
1361 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1362
1363 uint32_t i = 0;
1364
1365 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1366 /* find mvdd value which clock is more than request */
1367 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1368 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1369 /* Always round to higher voltage. */
1370 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1371 break;
1372 }
1373 }
1374
1375 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1376 "MVDD Voltage is outside the supported range.", return -EINVAL);
1377
1378 } else {
1379 return -EINVAL;
1380 }
1381
1382 return 0;
1383 }
1384
ci_populate_smc_acpi_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1385 static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1386 SMU7_Discrete_DpmTable *table)
1387 {
1388 int result = 0;
1389 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1390 struct pp_atomctrl_clock_dividers_vi dividers;
1391
1392 SMU7_Discrete_VoltageLevel voltage_level;
1393 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1394 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1395 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1396 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1397
1398
1399 /* The ACPI state should not do DPM on DC (or ever).*/
1400 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1401
1402 if (data->acpi_vddc)
1403 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1404 else
1405 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1406
1407 table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
1408 /* assign zero for now*/
1409 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1410
1411 /* get the engine clock dividers for this clock value*/
1412 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1413 table->ACPILevel.SclkFrequency, ÷rs);
1414
1415 PP_ASSERT_WITH_CODE(result == 0,
1416 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1417
1418 /* divider ID for required SCLK*/
1419 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1420 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1421 table->ACPILevel.DeepSleepDivId = 0;
1422
1423 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1424 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1425 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1426 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1427 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1428 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1429
1430 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1431 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1432 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1433 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1434 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1435 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1436 table->ACPILevel.CcPwrDynRm = 0;
1437 table->ACPILevel.CcPwrDynRm1 = 0;
1438
1439 /* For various features to be enabled/disabled while this level is active.*/
1440 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1441 /* SCLK frequency in units of 10KHz*/
1442 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1443 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1444 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1445 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1446 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1447 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1448 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1449 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1450 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1451
1452
1453 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1454 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1455 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1456
1457 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1458 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1459 else {
1460 if (data->acpi_vddci != 0)
1461 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1462 else
1463 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1464 }
1465
1466 if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
1467 table->MemoryACPILevel.MinMvdd =
1468 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1469 else
1470 table->MemoryACPILevel.MinMvdd = 0;
1471
1472 /* Force reset on DLL*/
1473 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1474 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1475 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1476 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1477
1478 /* Disable DLL in ACPIState*/
1479 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1480 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1481 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1482 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1483
1484 /* Enable DLL bypass signal*/
1485 dll_cntl = PHM_SET_FIELD(dll_cntl,
1486 DLL_CNTL, MRDCK0_BYPASS, 0);
1487 dll_cntl = PHM_SET_FIELD(dll_cntl,
1488 DLL_CNTL, MRDCK1_BYPASS, 0);
1489
1490 table->MemoryACPILevel.DllCntl =
1491 PP_HOST_TO_SMC_UL(dll_cntl);
1492 table->MemoryACPILevel.MclkPwrmgtCntl =
1493 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1494 table->MemoryACPILevel.MpllAdFuncCntl =
1495 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1496 table->MemoryACPILevel.MpllDqFuncCntl =
1497 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1498 table->MemoryACPILevel.MpllFuncCntl =
1499 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1500 table->MemoryACPILevel.MpllFuncCntl_1 =
1501 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1502 table->MemoryACPILevel.MpllFuncCntl_2 =
1503 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1504 table->MemoryACPILevel.MpllSs1 =
1505 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1506 table->MemoryACPILevel.MpllSs2 =
1507 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1508
1509 table->MemoryACPILevel.EnabledForThrottle = 0;
1510 table->MemoryACPILevel.EnabledForActivity = 0;
1511 table->MemoryACPILevel.UpH = 0;
1512 table->MemoryACPILevel.DownH = 100;
1513 table->MemoryACPILevel.VoltageDownH = 0;
1514 /* Indicates maximum activity level for this performance level.*/
1515 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1516
1517 table->MemoryACPILevel.StutterEnable = 0;
1518 table->MemoryACPILevel.StrobeEnable = 0;
1519 table->MemoryACPILevel.EdcReadEnable = 0;
1520 table->MemoryACPILevel.EdcWriteEnable = 0;
1521 table->MemoryACPILevel.RttEnable = 0;
1522
1523 return result;
1524 }
1525
ci_populate_smc_uvd_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1526 static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1527 SMU7_Discrete_DpmTable *table)
1528 {
1529 int result = 0;
1530 uint8_t count;
1531 struct pp_atomctrl_clock_dividers_vi dividers;
1532 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1533 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1534
1535 table->UvdLevelCount = (uint8_t)(uvd_table->count);
1536
1537 for (count = 0; count < table->UvdLevelCount; count++) {
1538 table->UvdLevel[count].VclkFrequency =
1539 uvd_table->entries[count].vclk;
1540 table->UvdLevel[count].DclkFrequency =
1541 uvd_table->entries[count].dclk;
1542 table->UvdLevel[count].MinVddc =
1543 uvd_table->entries[count].v * VOLTAGE_SCALE;
1544 table->UvdLevel[count].MinVddcPhases = 1;
1545
1546 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1547 table->UvdLevel[count].VclkFrequency, ÷rs);
1548 PP_ASSERT_WITH_CODE((0 == result),
1549 "can not find divide id for Vclk clock", return result);
1550
1551 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1552
1553 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1554 table->UvdLevel[count].DclkFrequency, ÷rs);
1555 PP_ASSERT_WITH_CODE((0 == result),
1556 "can not find divide id for Dclk clock", return result);
1557
1558 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1559 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1560 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1561 CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
1562 }
1563
1564 return result;
1565 }
1566
ci_populate_smc_vce_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1567 static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1568 SMU7_Discrete_DpmTable *table)
1569 {
1570 int result = -EINVAL;
1571 uint8_t count;
1572 struct pp_atomctrl_clock_dividers_vi dividers;
1573 struct phm_vce_clock_voltage_dependency_table *vce_table =
1574 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1575
1576 table->VceLevelCount = (uint8_t)(vce_table->count);
1577 table->VceBootLevel = 0;
1578
1579 for (count = 0; count < table->VceLevelCount; count++) {
1580 table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
1581 table->VceLevel[count].MinVoltage =
1582 vce_table->entries[count].v * VOLTAGE_SCALE;
1583 table->VceLevel[count].MinPhases = 1;
1584
1585 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1586 table->VceLevel[count].Frequency, ÷rs);
1587 PP_ASSERT_WITH_CODE((0 == result),
1588 "can not find divide id for VCE engine clock",
1589 return result);
1590
1591 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1592
1593 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1594 CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
1595 }
1596 return result;
1597 }
1598
ci_populate_smc_acp_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1599 static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1600 SMU7_Discrete_DpmTable *table)
1601 {
1602 int result = -EINVAL;
1603 uint8_t count;
1604 struct pp_atomctrl_clock_dividers_vi dividers;
1605 struct phm_acp_clock_voltage_dependency_table *acp_table =
1606 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
1607
1608 table->AcpLevelCount = (uint8_t)(acp_table->count);
1609 table->AcpBootLevel = 0;
1610
1611 for (count = 0; count < table->AcpLevelCount; count++) {
1612 table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
1613 table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
1614 table->AcpLevel[count].MinPhases = 1;
1615
1616 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1617 table->AcpLevel[count].Frequency, ÷rs);
1618 PP_ASSERT_WITH_CODE((0 == result),
1619 "can not find divide id for engine clock", return result);
1620
1621 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1622
1623 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1624 CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
1625 }
1626 return result;
1627 }
1628
ci_populate_memory_timing_parameters(struct pp_hwmgr * hwmgr,uint32_t engine_clock,uint32_t memory_clock,struct SMU7_Discrete_MCArbDramTimingTableEntry * arb_regs)1629 static int ci_populate_memory_timing_parameters(
1630 struct pp_hwmgr *hwmgr,
1631 uint32_t engine_clock,
1632 uint32_t memory_clock,
1633 struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
1634 )
1635 {
1636 uint32_t dramTiming;
1637 uint32_t dramTiming2;
1638 uint32_t burstTime;
1639 int result;
1640
1641 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1642 engine_clock, memory_clock);
1643
1644 PP_ASSERT_WITH_CODE(result == 0,
1645 "Error calling VBIOS to set DRAM_TIMING.", return result);
1646
1647 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1648 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1649 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1650
1651 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1652 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1653 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1654
1655 return 0;
1656 }
1657
ci_program_memory_timing_parameters(struct pp_hwmgr * hwmgr)1658 static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1659 {
1660 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1661 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1662 int result = 0;
1663 SMU7_Discrete_MCArbDramTimingTable arb_regs;
1664 uint32_t i, j;
1665
1666 memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1667
1668 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1669 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1670 result = ci_populate_memory_timing_parameters
1671 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1672 data->dpm_table.mclk_table.dpm_levels[j].value,
1673 &arb_regs.entries[i][j]);
1674
1675 if (0 != result)
1676 break;
1677 }
1678 }
1679
1680 if (0 == result) {
1681 result = ci_copy_bytes_to_smc(
1682 hwmgr,
1683 smu_data->arb_table_start,
1684 (uint8_t *)&arb_regs,
1685 sizeof(SMU7_Discrete_MCArbDramTimingTable),
1686 SMC_RAM_END
1687 );
1688 }
1689
1690 return result;
1691 }
1692
ci_populate_smc_boot_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1693 static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1694 SMU7_Discrete_DpmTable *table)
1695 {
1696 int result = 0;
1697 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1698 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1699
1700 table->GraphicsBootLevel = 0;
1701 table->MemoryBootLevel = 0;
1702
1703 /* find boot level from dpm table*/
1704 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1705 data->vbios_boot_state.sclk_bootup_value,
1706 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1707
1708 if (0 != result) {
1709 smu_data->smc_state_table.GraphicsBootLevel = 0;
1710 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
1711 result = 0;
1712 }
1713
1714 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1715 data->vbios_boot_state.mclk_bootup_value,
1716 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1717
1718 if (0 != result) {
1719 smu_data->smc_state_table.MemoryBootLevel = 0;
1720 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
1721 result = 0;
1722 }
1723
1724 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1725 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1726 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1727
1728 return result;
1729 }
1730
ci_populate_mc_reg_address(struct pp_hwmgr * hwmgr,SMU7_Discrete_MCRegisters * mc_reg_table)1731 static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1732 SMU7_Discrete_MCRegisters *mc_reg_table)
1733 {
1734 const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
1735
1736 uint32_t i, j;
1737
1738 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1739 if (smu_data->mc_reg_table.validflag & 1<<j) {
1740 PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1741 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1742 mc_reg_table->address[i].s0 =
1743 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1744 mc_reg_table->address[i].s1 =
1745 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1746 i++;
1747 }
1748 }
1749
1750 mc_reg_table->last = (uint8_t)i;
1751
1752 return 0;
1753 }
1754
ci_convert_mc_registers(const struct ci_mc_reg_entry * entry,SMU7_Discrete_MCRegisterSet * data,uint32_t num_entries,uint32_t valid_flag)1755 static void ci_convert_mc_registers(
1756 const struct ci_mc_reg_entry *entry,
1757 SMU7_Discrete_MCRegisterSet *data,
1758 uint32_t num_entries, uint32_t valid_flag)
1759 {
1760 uint32_t i, j;
1761
1762 for (i = 0, j = 0; j < num_entries; j++) {
1763 if (valid_flag & 1<<j) {
1764 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1765 i++;
1766 }
1767 }
1768 }
1769
ci_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr * hwmgr,const uint32_t memory_clock,SMU7_Discrete_MCRegisterSet * mc_reg_table_data)1770 static int ci_convert_mc_reg_table_entry_to_smc(
1771 struct pp_hwmgr *hwmgr,
1772 const uint32_t memory_clock,
1773 SMU7_Discrete_MCRegisterSet *mc_reg_table_data
1774 )
1775 {
1776 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1777 uint32_t i = 0;
1778
1779 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1780 if (memory_clock <=
1781 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1782 break;
1783 }
1784 }
1785
1786 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1787 --i;
1788
1789 ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1790 mc_reg_table_data, smu_data->mc_reg_table.last,
1791 smu_data->mc_reg_table.validflag);
1792
1793 return 0;
1794 }
1795
ci_convert_mc_reg_table_to_smc(struct pp_hwmgr * hwmgr,SMU7_Discrete_MCRegisters * mc_regs)1796 static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1797 SMU7_Discrete_MCRegisters *mc_regs)
1798 {
1799 int result = 0;
1800 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1801 int res;
1802 uint32_t i;
1803
1804 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1805 res = ci_convert_mc_reg_table_entry_to_smc(
1806 hwmgr,
1807 data->dpm_table.mclk_table.dpm_levels[i].value,
1808 &mc_regs->data[i]
1809 );
1810
1811 if (0 != res)
1812 result = res;
1813 }
1814
1815 return result;
1816 }
1817
ci_update_and_upload_mc_reg_table(struct pp_hwmgr * hwmgr)1818 static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1819 {
1820 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1821 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1822 uint32_t address;
1823 int32_t result;
1824
1825 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1826 return 0;
1827
1828
1829 memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
1830
1831 result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1832
1833 if (result != 0)
1834 return result;
1835
1836 address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
1837
1838 return ci_copy_bytes_to_smc(hwmgr, address,
1839 (uint8_t *)&smu_data->mc_regs.data[0],
1840 sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1841 SMC_RAM_END);
1842 }
1843
ci_populate_initial_mc_reg_table(struct pp_hwmgr * hwmgr)1844 static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1845 {
1846 int result;
1847 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1848
1849 memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
1850 result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1851 PP_ASSERT_WITH_CODE(0 == result,
1852 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1853
1854 result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1855 PP_ASSERT_WITH_CODE(0 == result,
1856 "Failed to initialize MCRegTable for driver state!", return result;);
1857
1858 return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
1859 (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
1860 }
1861
ci_populate_smc_initial_state(struct pp_hwmgr * hwmgr)1862 static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1863 {
1864 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1865 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1866 uint8_t count, level;
1867
1868 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1869
1870 for (level = 0; level < count; level++) {
1871 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1872 >= data->vbios_boot_state.sclk_bootup_value) {
1873 smu_data->smc_state_table.GraphicsBootLevel = level;
1874 break;
1875 }
1876 }
1877
1878 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1879
1880 for (level = 0; level < count; level++) {
1881 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1882 >= data->vbios_boot_state.mclk_bootup_value) {
1883 smu_data->smc_state_table.MemoryBootLevel = level;
1884 break;
1885 }
1886 }
1887
1888 return 0;
1889 }
1890
ci_populate_smc_svi2_config(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1891 static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1892 SMU7_Discrete_DpmTable *table)
1893 {
1894 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1895
1896 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1897 table->SVI2Enable = 1;
1898 else
1899 table->SVI2Enable = 0;
1900 return 0;
1901 }
1902
ci_start_smc(struct pp_hwmgr * hwmgr)1903 static int ci_start_smc(struct pp_hwmgr *hwmgr)
1904 {
1905 /* set smc instruct start point at 0x0 */
1906 ci_program_jump_on_start(hwmgr);
1907
1908 /* enable smc clock */
1909 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
1910
1911 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
1912
1913 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
1914 INTERRUPTS_ENABLED, 1);
1915
1916 return 0;
1917 }
1918
ci_populate_vr_config(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1919 static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
1920 {
1921 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1922 uint16_t config;
1923
1924 config = VR_SVI2_PLANE_1;
1925 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1926
1927 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1928 config = VR_SVI2_PLANE_2;
1929 table->VRConfig |= config;
1930 } else {
1931 pr_info("VDDCshould be on SVI2 controller!");
1932 }
1933
1934 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1935 config = VR_SVI2_PLANE_2;
1936 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1937 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1938 config = VR_SMIO_PATTERN_1;
1939 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1940 }
1941
1942 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1943 config = VR_SMIO_PATTERN_2;
1944 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1945 }
1946
1947 return 0;
1948 }
1949
ci_init_smc_table(struct pp_hwmgr * hwmgr)1950 static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1951 {
1952 int result;
1953 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1954 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1955 SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1956 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1957 u32 i;
1958
1959 ci_initialize_power_tune_defaults(hwmgr);
1960 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1961
1962 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1963 ci_populate_smc_voltage_tables(hwmgr, table);
1964
1965 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1966 PHM_PlatformCaps_AutomaticDCTransition))
1967 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1968
1969
1970 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1971 PHM_PlatformCaps_StepVddc))
1972 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1973
1974 if (data->is_memory_gddr5)
1975 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1976
1977 if (data->ulv_supported) {
1978 result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
1979 PP_ASSERT_WITH_CODE(0 == result,
1980 "Failed to initialize ULV state!", return result);
1981
1982 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1983 ixCG_ULV_PARAMETER, 0x40035);
1984 }
1985
1986 result = ci_populate_all_graphic_levels(hwmgr);
1987 PP_ASSERT_WITH_CODE(0 == result,
1988 "Failed to initialize Graphics Level!", return result);
1989
1990 result = ci_populate_all_memory_levels(hwmgr);
1991 PP_ASSERT_WITH_CODE(0 == result,
1992 "Failed to initialize Memory Level!", return result);
1993
1994 result = ci_populate_smc_link_level(hwmgr, table);
1995 PP_ASSERT_WITH_CODE(0 == result,
1996 "Failed to initialize Link Level!", return result);
1997
1998 result = ci_populate_smc_acpi_level(hwmgr, table);
1999 PP_ASSERT_WITH_CODE(0 == result,
2000 "Failed to initialize ACPI Level!", return result);
2001
2002 result = ci_populate_smc_vce_level(hwmgr, table);
2003 PP_ASSERT_WITH_CODE(0 == result,
2004 "Failed to initialize VCE Level!", return result);
2005
2006 result = ci_populate_smc_acp_level(hwmgr, table);
2007 PP_ASSERT_WITH_CODE(0 == result,
2008 "Failed to initialize ACP Level!", return result);
2009
2010 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2011 /* need to populate the ARB settings for the initial state. */
2012 result = ci_program_memory_timing_parameters(hwmgr);
2013 PP_ASSERT_WITH_CODE(0 == result,
2014 "Failed to Write ARB settings for the initial state.", return result);
2015
2016 result = ci_populate_smc_uvd_level(hwmgr, table);
2017 PP_ASSERT_WITH_CODE(0 == result,
2018 "Failed to initialize UVD Level!", return result);
2019
2020 table->UvdBootLevel = 0;
2021 table->VceBootLevel = 0;
2022 table->AcpBootLevel = 0;
2023 table->SamuBootLevel = 0;
2024
2025 table->GraphicsBootLevel = 0;
2026 table->MemoryBootLevel = 0;
2027
2028 result = ci_populate_smc_boot_level(hwmgr, table);
2029 PP_ASSERT_WITH_CODE(0 == result,
2030 "Failed to initialize Boot Level!", return result);
2031
2032 result = ci_populate_smc_initial_state(hwmgr);
2033 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2034
2035 result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
2036 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2037
2038 table->UVDInterval = 1;
2039 table->VCEInterval = 1;
2040 table->ACPInterval = 1;
2041 table->SAMUInterval = 1;
2042 table->GraphicsVoltageChangeEnable = 1;
2043 table->GraphicsThermThrottleEnable = 1;
2044 table->GraphicsInterval = 1;
2045 table->VoltageInterval = 1;
2046 table->ThermalInterval = 1;
2047
2048 table->TemperatureLimitHigh =
2049 (data->thermal_temp_setting.temperature_high *
2050 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2051 table->TemperatureLimitLow =
2052 (data->thermal_temp_setting.temperature_low *
2053 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2054
2055 table->MemoryVoltageChangeEnable = 1;
2056 table->MemoryInterval = 1;
2057 table->VoltageResponseTime = 0;
2058 table->VddcVddciDelta = 4000;
2059 table->PhaseResponseTime = 0;
2060 table->MemoryThermThrottleEnable = 1;
2061
2062 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2063 "There must be 1 or more PCIE levels defined in PPTable.",
2064 return -EINVAL);
2065
2066 table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2067 table->PCIeGenInterval = 1;
2068
2069 result = ci_populate_vr_config(hwmgr, table);
2070 PP_ASSERT_WITH_CODE(0 == result,
2071 "Failed to populate VRConfig setting!", return result);
2072 data->vr_config = table->VRConfig;
2073
2074 ci_populate_smc_svi2_config(hwmgr, table);
2075
2076 for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
2077 CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
2078
2079 table->ThermGpio = 17;
2080 table->SclkStepSize = 0x4000;
2081 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2082 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2083 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2084 PHM_PlatformCaps_RegulatorHot);
2085 } else {
2086 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2087 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2088 PHM_PlatformCaps_RegulatorHot);
2089 }
2090
2091 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2092
2093 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2094 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2095 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2096 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2097 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2098 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2099 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2100 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2101 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2102 table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
2103 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2104 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2105
2106 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2107 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2108 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2109
2110 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2111 result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
2112 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
2113 (uint8_t *)&(table->SystemFlags),
2114 sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
2115 SMC_RAM_END);
2116
2117 PP_ASSERT_WITH_CODE(0 == result,
2118 "Failed to upload dpm data to SMC memory!", return result;);
2119
2120 result = ci_populate_initial_mc_reg_table(hwmgr);
2121 PP_ASSERT_WITH_CODE((0 == result),
2122 "Failed to populate initialize MC Reg table!", return result);
2123
2124 result = ci_populate_pm_fuses(hwmgr);
2125 PP_ASSERT_WITH_CODE(0 == result,
2126 "Failed to populate PM fuses to SMC memory!", return result);
2127
2128 ci_start_smc(hwmgr);
2129
2130 return 0;
2131 }
2132
ci_thermal_setup_fan_table(struct pp_hwmgr * hwmgr)2133 static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2134 {
2135 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2136 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2137 uint32_t duty100;
2138 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2139 uint16_t fdo_min, slope1, slope2;
2140 uint32_t reference_clock;
2141 int res;
2142 uint64_t tmp64;
2143
2144 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2145 return 0;
2146
2147 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2148 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2149 PHM_PlatformCaps_MicrocodeFanControl);
2150 return 0;
2151 }
2152
2153 if (0 == ci_data->fan_table_start) {
2154 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2155 return 0;
2156 }
2157
2158 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2159
2160 if (0 == duty100) {
2161 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2162 return 0;
2163 }
2164
2165 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2166 do_div(tmp64, 10000);
2167 fdo_min = (uint16_t)tmp64;
2168
2169 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2170 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2171
2172 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2173 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2174
2175 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2176 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2177
2178 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2179 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2180 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2181
2182 fan_table.Slope1 = cpu_to_be16(slope1);
2183 fan_table.Slope2 = cpu_to_be16(slope2);
2184
2185 fan_table.FdoMin = cpu_to_be16(fdo_min);
2186
2187 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2188
2189 fan_table.HystUp = cpu_to_be16(1);
2190
2191 fan_table.HystSlope = cpu_to_be16(1);
2192
2193 fan_table.TempRespLim = cpu_to_be16(5);
2194
2195 reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
2196
2197 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2198
2199 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2200
2201 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2202
2203 res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2204
2205 return res;
2206 }
2207
ci_program_mem_timing_parameters(struct pp_hwmgr * hwmgr)2208 static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2209 {
2210 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2211
2212 if (data->need_update_smu7_dpm_table &
2213 (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK))
2214 return ci_program_memory_timing_parameters(hwmgr);
2215
2216 return 0;
2217 }
2218
ci_update_sclk_threshold(struct pp_hwmgr * hwmgr)2219 static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2220 {
2221 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2222 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2223
2224 int result = 0;
2225 uint32_t low_sclk_interrupt_threshold = 0;
2226
2227 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2228 PHM_PlatformCaps_SclkThrottleLowNotification)
2229 && (data->low_sclk_interrupt_threshold != 0)) {
2230 low_sclk_interrupt_threshold =
2231 data->low_sclk_interrupt_threshold;
2232
2233 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2234
2235 result = ci_copy_bytes_to_smc(
2236 hwmgr,
2237 smu_data->dpm_table_start +
2238 offsetof(SMU7_Discrete_DpmTable,
2239 LowSclkInterruptT),
2240 (uint8_t *)&low_sclk_interrupt_threshold,
2241 sizeof(uint32_t),
2242 SMC_RAM_END);
2243 }
2244
2245 result = ci_update_and_upload_mc_reg_table(hwmgr);
2246
2247 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2248
2249 result = ci_program_mem_timing_parameters(hwmgr);
2250 PP_ASSERT_WITH_CODE((result == 0),
2251 "Failed to program memory timing parameters!",
2252 );
2253
2254 return result;
2255 }
2256
ci_get_offsetof(uint32_t type,uint32_t member)2257 static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2258 {
2259 switch (type) {
2260 case SMU_SoftRegisters:
2261 switch (member) {
2262 case HandshakeDisables:
2263 return offsetof(SMU7_SoftRegisters, HandshakeDisables);
2264 case VoltageChangeTimeout:
2265 return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
2266 case AverageGraphicsActivity:
2267 return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
2268 case AverageMemoryActivity:
2269 return offsetof(SMU7_SoftRegisters, AverageMemoryA);
2270 case PreVBlankGap:
2271 return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2272 case VBlankTimeout:
2273 return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2274 case DRAM_LOG_ADDR_H:
2275 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
2276 case DRAM_LOG_ADDR_L:
2277 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
2278 case DRAM_LOG_PHY_ADDR_H:
2279 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2280 case DRAM_LOG_PHY_ADDR_L:
2281 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2282 case DRAM_LOG_BUFF_SIZE:
2283 return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2284 }
2285 break;
2286 case SMU_Discrete_DpmTable:
2287 switch (member) {
2288 case LowSclkInterruptThreshold:
2289 return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
2290 }
2291 break;
2292 }
2293 pr_debug("can't get the offset of type %x member %x\n", type, member);
2294 return 0;
2295 }
2296
ci_get_mac_definition(uint32_t value)2297 static uint32_t ci_get_mac_definition(uint32_t value)
2298 {
2299 switch (value) {
2300 case SMU_MAX_LEVELS_GRAPHICS:
2301 return SMU7_MAX_LEVELS_GRAPHICS;
2302 case SMU_MAX_LEVELS_MEMORY:
2303 return SMU7_MAX_LEVELS_MEMORY;
2304 case SMU_MAX_LEVELS_LINK:
2305 return SMU7_MAX_LEVELS_LINK;
2306 case SMU_MAX_ENTRIES_SMIO:
2307 return SMU7_MAX_ENTRIES_SMIO;
2308 case SMU_MAX_LEVELS_VDDC:
2309 case SMU_MAX_LEVELS_VDDGFX:
2310 return SMU7_MAX_LEVELS_VDDC;
2311 case SMU_MAX_LEVELS_VDDCI:
2312 return SMU7_MAX_LEVELS_VDDCI;
2313 case SMU_MAX_LEVELS_MVDD:
2314 return SMU7_MAX_LEVELS_MVDD;
2315 }
2316
2317 pr_debug("can't get the mac of %x\n", value);
2318 return 0;
2319 }
2320
ci_load_smc_ucode(struct pp_hwmgr * hwmgr)2321 static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
2322 {
2323 uint32_t byte_count, start_addr;
2324 uint8_t *src;
2325 uint32_t data;
2326
2327 struct cgs_firmware_info info = {0};
2328
2329 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
2330
2331 hwmgr->is_kicker = info.is_kicker;
2332 hwmgr->smu_version = info.version;
2333 byte_count = info.image_size;
2334 src = (uint8_t *)info.kptr;
2335 start_addr = info.ucode_start_address;
2336
2337 if (byte_count > SMC_RAM_END) {
2338 pr_err("SMC address is beyond the SMC RAM area.\n");
2339 return -EINVAL;
2340 }
2341
2342 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
2343 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
2344
2345 for (; byte_count >= 4; byte_count -= 4) {
2346 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
2347 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
2348 src += 4;
2349 }
2350 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
2351
2352 if (0 != byte_count) {
2353 pr_err("SMC size must be divisible by 4\n");
2354 return -EINVAL;
2355 }
2356
2357 return 0;
2358 }
2359
ci_upload_firmware(struct pp_hwmgr * hwmgr)2360 static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
2361 {
2362 if (ci_is_smc_ram_running(hwmgr)) {
2363 pr_info("smc is running, no need to load smc firmware\n");
2364 return 0;
2365 }
2366 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
2367 boot_seq_done, 1);
2368 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
2369 pre_fetcher_en, 1);
2370
2371 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
2372 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
2373 return ci_load_smc_ucode(hwmgr);
2374 }
2375
ci_process_firmware_header(struct pp_hwmgr * hwmgr)2376 static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
2377 {
2378 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2379 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2380
2381 uint32_t tmp = 0;
2382 int result;
2383 bool error = false;
2384
2385 if (ci_upload_firmware(hwmgr))
2386 return -EINVAL;
2387
2388 result = ci_read_smc_sram_dword(hwmgr,
2389 SMU7_FIRMWARE_HEADER_LOCATION +
2390 offsetof(SMU7_Firmware_Header, DpmTable),
2391 &tmp, SMC_RAM_END);
2392
2393 if (0 == result)
2394 ci_data->dpm_table_start = tmp;
2395
2396 error |= (0 != result);
2397
2398 result = ci_read_smc_sram_dword(hwmgr,
2399 SMU7_FIRMWARE_HEADER_LOCATION +
2400 offsetof(SMU7_Firmware_Header, SoftRegisters),
2401 &tmp, SMC_RAM_END);
2402
2403 if (0 == result) {
2404 data->soft_regs_start = tmp;
2405 ci_data->soft_regs_start = tmp;
2406 }
2407
2408 error |= (0 != result);
2409
2410 result = ci_read_smc_sram_dword(hwmgr,
2411 SMU7_FIRMWARE_HEADER_LOCATION +
2412 offsetof(SMU7_Firmware_Header, mcRegisterTable),
2413 &tmp, SMC_RAM_END);
2414
2415 if (0 == result)
2416 ci_data->mc_reg_table_start = tmp;
2417
2418 result = ci_read_smc_sram_dword(hwmgr,
2419 SMU7_FIRMWARE_HEADER_LOCATION +
2420 offsetof(SMU7_Firmware_Header, FanTable),
2421 &tmp, SMC_RAM_END);
2422
2423 if (0 == result)
2424 ci_data->fan_table_start = tmp;
2425
2426 error |= (0 != result);
2427
2428 result = ci_read_smc_sram_dword(hwmgr,
2429 SMU7_FIRMWARE_HEADER_LOCATION +
2430 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
2431 &tmp, SMC_RAM_END);
2432
2433 if (0 == result)
2434 ci_data->arb_table_start = tmp;
2435
2436 error |= (0 != result);
2437
2438 result = ci_read_smc_sram_dword(hwmgr,
2439 SMU7_FIRMWARE_HEADER_LOCATION +
2440 offsetof(SMU7_Firmware_Header, Version),
2441 &tmp, SMC_RAM_END);
2442
2443 if (0 == result)
2444 hwmgr->microcode_version_info.SMC = tmp;
2445
2446 error |= (0 != result);
2447
2448 return error ? 1 : 0;
2449 }
2450
ci_get_memory_modile_index(struct pp_hwmgr * hwmgr)2451 static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2452 {
2453 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2454 }
2455
ci_check_s0_mc_reg_index(uint16_t in_reg,uint16_t * out_reg)2456 static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2457 {
2458 bool result = true;
2459
2460 switch (in_reg) {
2461 case mmMC_SEQ_RAS_TIMING:
2462 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2463 break;
2464
2465 case mmMC_SEQ_DLL_STBY:
2466 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2467 break;
2468
2469 case mmMC_SEQ_G5PDX_CMD0:
2470 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2471 break;
2472
2473 case mmMC_SEQ_G5PDX_CMD1:
2474 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2475 break;
2476
2477 case mmMC_SEQ_G5PDX_CTRL:
2478 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2479 break;
2480
2481 case mmMC_SEQ_CAS_TIMING:
2482 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2483 break;
2484
2485 case mmMC_SEQ_MISC_TIMING:
2486 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2487 break;
2488
2489 case mmMC_SEQ_MISC_TIMING2:
2490 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2491 break;
2492
2493 case mmMC_SEQ_PMG_DVS_CMD:
2494 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2495 break;
2496
2497 case mmMC_SEQ_PMG_DVS_CTL:
2498 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2499 break;
2500
2501 case mmMC_SEQ_RD_CTL_D0:
2502 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2503 break;
2504
2505 case mmMC_SEQ_RD_CTL_D1:
2506 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2507 break;
2508
2509 case mmMC_SEQ_WR_CTL_D0:
2510 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2511 break;
2512
2513 case mmMC_SEQ_WR_CTL_D1:
2514 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2515 break;
2516
2517 case mmMC_PMG_CMD_EMRS:
2518 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2519 break;
2520
2521 case mmMC_PMG_CMD_MRS:
2522 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2523 break;
2524
2525 case mmMC_PMG_CMD_MRS1:
2526 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2527 break;
2528
2529 case mmMC_SEQ_PMG_TIMING:
2530 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2531 break;
2532
2533 case mmMC_PMG_CMD_MRS2:
2534 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2535 break;
2536
2537 case mmMC_SEQ_WR_CTL_2:
2538 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2539 break;
2540
2541 default:
2542 result = false;
2543 break;
2544 }
2545
2546 return result;
2547 }
2548
ci_set_s0_mc_reg_index(struct ci_mc_reg_table * table)2549 static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
2550 {
2551 uint32_t i;
2552 uint16_t address;
2553
2554 for (i = 0; i < table->last; i++) {
2555 table->mc_reg_address[i].s0 =
2556 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2557 ? address : table->mc_reg_address[i].s1;
2558 }
2559 return 0;
2560 }
2561
ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table * table,struct ci_mc_reg_table * ni_table)2562 static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2563 struct ci_mc_reg_table *ni_table)
2564 {
2565 uint8_t i, j;
2566
2567 PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2568 "Invalid VramInfo table.", return -EINVAL);
2569 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2570 "Invalid VramInfo table.", return -EINVAL);
2571
2572 for (i = 0; i < table->last; i++)
2573 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2574
2575 ni_table->last = table->last;
2576
2577 for (i = 0; i < table->num_entries; i++) {
2578 ni_table->mc_reg_table_entry[i].mclk_max =
2579 table->mc_reg_table_entry[i].mclk_max;
2580 for (j = 0; j < table->last; j++) {
2581 ni_table->mc_reg_table_entry[i].mc_data[j] =
2582 table->mc_reg_table_entry[i].mc_data[j];
2583 }
2584 }
2585
2586 ni_table->num_entries = table->num_entries;
2587
2588 return 0;
2589 }
2590
ci_set_mc_special_registers(struct pp_hwmgr * hwmgr,struct ci_mc_reg_table * table)2591 static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2592 struct ci_mc_reg_table *table)
2593 {
2594 uint8_t i, j, k;
2595 uint32_t temp_reg;
2596 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2597
2598 for (i = 0, j = table->last; i < table->last; i++) {
2599 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2600 "Invalid VramInfo table.", return -EINVAL);
2601
2602 switch (table->mc_reg_address[i].s1) {
2603
2604 case mmMC_SEQ_MISC1:
2605 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2606 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2607 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2608 for (k = 0; k < table->num_entries; k++) {
2609 table->mc_reg_table_entry[k].mc_data[j] =
2610 ((temp_reg & 0xffff0000)) |
2611 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2612 }
2613 j++;
2614
2615 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2616 "Invalid VramInfo table.", return -EINVAL);
2617 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2618 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2619 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2620 for (k = 0; k < table->num_entries; k++) {
2621 table->mc_reg_table_entry[k].mc_data[j] =
2622 (temp_reg & 0xffff0000) |
2623 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2624
2625 if (!data->is_memory_gddr5)
2626 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2627 }
2628 j++;
2629
2630 if (!data->is_memory_gddr5) {
2631 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2632 "Invalid VramInfo table.", return -EINVAL);
2633 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2634 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2635 for (k = 0; k < table->num_entries; k++) {
2636 table->mc_reg_table_entry[k].mc_data[j] =
2637 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2638 }
2639 j++;
2640 }
2641
2642 break;
2643
2644 case mmMC_SEQ_RESERVE_M:
2645 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2646 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2647 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2648 for (k = 0; k < table->num_entries; k++) {
2649 table->mc_reg_table_entry[k].mc_data[j] =
2650 (temp_reg & 0xffff0000) |
2651 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2652 }
2653 j++;
2654 break;
2655
2656 default:
2657 break;
2658 }
2659
2660 }
2661
2662 table->last = j;
2663
2664 return 0;
2665 }
2666
ci_set_valid_flag(struct ci_mc_reg_table * table)2667 static int ci_set_valid_flag(struct ci_mc_reg_table *table)
2668 {
2669 uint8_t i, j;
2670
2671 for (i = 0; i < table->last; i++) {
2672 for (j = 1; j < table->num_entries; j++) {
2673 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2674 table->mc_reg_table_entry[j].mc_data[i]) {
2675 table->validflag |= (1 << i);
2676 break;
2677 }
2678 }
2679 }
2680
2681 return 0;
2682 }
2683
ci_initialize_mc_reg_table(struct pp_hwmgr * hwmgr)2684 static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2685 {
2686 int result;
2687 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2688 pp_atomctrl_mc_reg_table *table;
2689 struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2690 uint8_t module_index = ci_get_memory_modile_index(hwmgr);
2691
2692 table = kzalloc_obj(pp_atomctrl_mc_reg_table);
2693
2694 if (NULL == table)
2695 return -ENOMEM;
2696
2697 /* Program additional LP registers that are no longer programmed by VBIOS */
2698 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2699 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2700 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2701 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2702 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2703 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2704 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2705 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2706 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2707 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2708 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2709 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2710 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2711 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2712 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2713 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2714 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2715 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2716 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2717 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2718
2719 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2720
2721 if (0 == result)
2722 result = ci_copy_vbios_smc_reg_table(table, ni_table);
2723
2724 if (0 == result) {
2725 ci_set_s0_mc_reg_index(ni_table);
2726 result = ci_set_mc_special_registers(hwmgr, ni_table);
2727 }
2728
2729 if (0 == result)
2730 ci_set_valid_flag(ni_table);
2731
2732 kfree(table);
2733
2734 return result;
2735 }
2736
ci_is_dpm_running(struct pp_hwmgr * hwmgr)2737 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2738 {
2739 return ci_is_smc_ram_running(hwmgr);
2740 }
2741
ci_smu_init(struct pp_hwmgr * hwmgr)2742 static int ci_smu_init(struct pp_hwmgr *hwmgr)
2743 {
2744 struct ci_smumgr *ci_priv;
2745
2746 ci_priv = kzalloc_obj(struct ci_smumgr);
2747
2748 if (ci_priv == NULL)
2749 return -ENOMEM;
2750
2751 hwmgr->smu_backend = ci_priv;
2752
2753 return 0;
2754 }
2755
ci_smu_fini(struct pp_hwmgr * hwmgr)2756 static int ci_smu_fini(struct pp_hwmgr *hwmgr)
2757 {
2758 kfree(hwmgr->smu_backend);
2759 hwmgr->smu_backend = NULL;
2760 return 0;
2761 }
2762
ci_start_smu(struct pp_hwmgr * hwmgr)2763 static int ci_start_smu(struct pp_hwmgr *hwmgr)
2764 {
2765 return 0;
2766 }
2767
ci_update_dpm_settings(struct pp_hwmgr * hwmgr,void * profile_setting)2768 static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
2769 void *profile_setting)
2770 {
2771 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2772 struct ci_smumgr *smu_data = (struct ci_smumgr *)
2773 (hwmgr->smu_backend);
2774 struct profile_mode_setting *setting;
2775 struct SMU7_Discrete_GraphicsLevel *levels =
2776 smu_data->smc_state_table.GraphicsLevel;
2777 uint32_t array = smu_data->dpm_table_start +
2778 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2779
2780 uint32_t mclk_array = smu_data->dpm_table_start +
2781 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2782 struct SMU7_Discrete_MemoryLevel *mclk_levels =
2783 smu_data->smc_state_table.MemoryLevel;
2784 uint32_t i;
2785 uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2786
2787 if (profile_setting == NULL)
2788 return -EINVAL;
2789
2790 setting = (struct profile_mode_setting *)profile_setting;
2791
2792 if (setting->bupdate_sclk) {
2793 if (!data->sclk_dpm_key_disabled)
2794 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
2795 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2796 if (levels[i].ActivityLevel !=
2797 cpu_to_be16(setting->sclk_activity)) {
2798 levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2799
2800 clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2801 + offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel);
2802 offset = clk_activity_offset & ~0x3;
2803 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2804 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2805 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2806
2807 }
2808 if (levels[i].UpH != setting->sclk_up_hyst ||
2809 levels[i].DownH != setting->sclk_down_hyst) {
2810 levels[i].UpH = setting->sclk_up_hyst;
2811 levels[i].DownH = setting->sclk_down_hyst;
2812 up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2813 + offsetof(SMU7_Discrete_GraphicsLevel, UpH);
2814 down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2815 + offsetof(SMU7_Discrete_GraphicsLevel, DownH);
2816 offset = up_hyst_offset & ~0x3;
2817 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2818 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t));
2819 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t));
2820 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2821 }
2822 }
2823 if (!data->sclk_dpm_key_disabled)
2824 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
2825 }
2826
2827 if (setting->bupdate_mclk) {
2828 if (!data->mclk_dpm_key_disabled)
2829 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
2830 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2831 if (mclk_levels[i].ActivityLevel !=
2832 cpu_to_be16(setting->mclk_activity)) {
2833 mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2834
2835 clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2836 + offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel);
2837 offset = clk_activity_offset & ~0x3;
2838 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2839 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2840 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2841
2842 }
2843 if (mclk_levels[i].UpH != setting->mclk_up_hyst ||
2844 mclk_levels[i].DownH != setting->mclk_down_hyst) {
2845 mclk_levels[i].UpH = setting->mclk_up_hyst;
2846 mclk_levels[i].DownH = setting->mclk_down_hyst;
2847 up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2848 + offsetof(SMU7_Discrete_MemoryLevel, UpH);
2849 down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2850 + offsetof(SMU7_Discrete_MemoryLevel, DownH);
2851 offset = up_hyst_offset & ~0x3;
2852 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2853 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t));
2854 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t));
2855 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2856 }
2857 }
2858 if (!data->mclk_dpm_key_disabled)
2859 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
2860 }
2861 return 0;
2862 }
2863
ci_update_uvd_smc_table(struct pp_hwmgr * hwmgr)2864 static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2865 {
2866 struct amdgpu_device *adev = hwmgr->adev;
2867 struct smu7_hwmgr *data = hwmgr->backend;
2868 struct ci_smumgr *smu_data = hwmgr->smu_backend;
2869 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
2870 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
2871 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2872 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2873 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2874 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2875 uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2876 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2877 int32_t i;
2878
2879 if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
2880 smu_data->smc_state_table.UvdBootLevel = 0;
2881 else
2882 smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
2883
2884 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2885 UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
2886
2887 data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
2888
2889 for (i = uvd_table->count - 1; i >= 0; i--) {
2890 if (uvd_table->entries[i].v <= max_vddc)
2891 data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
2892 if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
2893 break;
2894 }
2895 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
2896 data->dpm_level_enable_mask.uvd_dpm_enable_mask,
2897 NULL);
2898
2899 return 0;
2900 }
2901
ci_update_vce_smc_table(struct pp_hwmgr * hwmgr)2902 static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2903 {
2904 struct amdgpu_device *adev = hwmgr->adev;
2905 struct smu7_hwmgr *data = hwmgr->backend;
2906 struct phm_vce_clock_voltage_dependency_table *vce_table =
2907 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
2908 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2909 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2910 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2911 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2912 uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2913 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2914 int32_t i;
2915
2916 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2917 VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
2918
2919 data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
2920
2921 for (i = vce_table->count - 1; i >= 0; i--) {
2922 if (vce_table->entries[i].v <= max_vddc)
2923 data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
2924 if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
2925 break;
2926 }
2927 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
2928 data->dpm_level_enable_mask.vce_dpm_enable_mask,
2929 NULL);
2930
2931 return 0;
2932 }
2933
ci_update_smc_table(struct pp_hwmgr * hwmgr,uint32_t type)2934 static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2935 {
2936 switch (type) {
2937 case SMU_UVD_TABLE:
2938 ci_update_uvd_smc_table(hwmgr);
2939 break;
2940 case SMU_VCE_TABLE:
2941 ci_update_vce_smc_table(hwmgr);
2942 break;
2943 default:
2944 break;
2945 }
2946 return 0;
2947 }
2948
ci_reset_smc(struct pp_hwmgr * hwmgr)2949 static void ci_reset_smc(struct pp_hwmgr *hwmgr)
2950 {
2951 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2952 SMC_SYSCON_RESET_CNTL,
2953 rst_reg, 1);
2954 }
2955
2956
ci_stop_smc_clock(struct pp_hwmgr * hwmgr)2957 static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
2958 {
2959 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2960 SMC_SYSCON_CLOCK_CNTL_0,
2961 ck_disable, 1);
2962 }
2963
ci_stop_smc(struct pp_hwmgr * hwmgr)2964 static int ci_stop_smc(struct pp_hwmgr *hwmgr)
2965 {
2966 ci_reset_smc(hwmgr);
2967 ci_stop_smc_clock(hwmgr);
2968
2969 return 0;
2970 }
2971
2972 const struct pp_smumgr_func ci_smu_funcs = {
2973 .name = "ci_smu",
2974 .smu_init = ci_smu_init,
2975 .smu_fini = ci_smu_fini,
2976 .start_smu = ci_start_smu,
2977 .check_fw_load_finish = NULL,
2978 .request_smu_load_fw = NULL,
2979 .request_smu_load_specific_fw = NULL,
2980 .send_msg_to_smc = ci_send_msg_to_smc,
2981 .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
2982 .get_argument = smu7_get_argument,
2983 .download_pptable_settings = NULL,
2984 .upload_pptable_settings = NULL,
2985 .get_offsetof = ci_get_offsetof,
2986 .process_firmware_header = ci_process_firmware_header,
2987 .init_smc_table = ci_init_smc_table,
2988 .update_sclk_threshold = ci_update_sclk_threshold,
2989 .thermal_setup_fan_table = ci_thermal_setup_fan_table,
2990 .populate_all_graphic_levels = ci_populate_all_graphic_levels,
2991 .populate_all_memory_levels = ci_populate_all_memory_levels,
2992 .get_mac_definition = ci_get_mac_definition,
2993 .initialize_mc_reg_table = ci_initialize_mc_reg_table,
2994 .is_dpm_running = ci_is_dpm_running,
2995 .update_dpm_settings = ci_update_dpm_settings,
2996 .update_smc_table = ci_update_smc_table,
2997 .stop_smc = ci_stop_smc,
2998 };
2999