xref: /linux/drivers/gpu/drm/radeon/ci_dpm.c (revision 0b8061c340b643e01da431dd60c75a41bb1d31ec)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26 #include <linux/seq_file.h>
27 
28 #include "atom.h"
29 #include "ci_dpm.h"
30 #include "cik.h"
31 #include "cikd.h"
32 #include "r600_dpm.h"
33 #include "radeon.h"
34 #include "radeon_asic.h"
35 #include "radeon_ucode.h"
36 #include "si_dpm.h"
37 
38 #define MC_CG_ARB_FREQ_F0           0x0a
39 #define MC_CG_ARB_FREQ_F1           0x0b
40 #define MC_CG_ARB_FREQ_F2           0x0c
41 #define MC_CG_ARB_FREQ_F3           0x0d
42 
43 #define SMC_RAM_END 0x40000
44 
45 #define VOLTAGE_SCALE               4
46 #define VOLTAGE_VID_OFFSET_SCALE1    625
47 #define VOLTAGE_VID_OFFSET_SCALE2    100
48 
49 static const struct ci_pt_defaults defaults_hawaii_xt =
50 {
51 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
52 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
53 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
54 };
55 
56 static const struct ci_pt_defaults defaults_hawaii_pro =
57 {
58 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
59 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
60 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
61 };
62 
63 static const struct ci_pt_defaults defaults_bonaire_xt =
64 {
65 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
66 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
67 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
68 };
69 
70 static const struct ci_pt_defaults defaults_saturn_xt =
71 {
72 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
73 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
74 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
75 };
76 
77 static const struct ci_pt_config_reg didt_config_ci[] =
78 {
79 	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80 	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81 	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82 	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83 	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84 	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
85 	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
86 	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
87 	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
88 	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
89 	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
90 	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
92 	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
93 	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
94 	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
95 	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
96 	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103 	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
104 	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
105 	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
106 	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107 	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108 	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
110 	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
111 	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
112 	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
113 	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
114 	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122 	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
128 	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
129 	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
130 	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
131 	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132 	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140 	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
146 	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
147 	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
148 	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
149 	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150 	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 	{ 0xFFFFFFFF }
152 };
153 
154 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
155 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
156 				       u32 arb_freq_src, u32 arb_freq_dest);
157 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
158 					 struct atom_voltage_table_entry *voltage_table,
159 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
160 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
161 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
162 				       u32 target_tdp);
163 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
164 
165 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
166 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
167 						      PPSMC_Msg msg, u32 parameter);
168 
169 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
170 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
171 
172 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
173 {
174 	struct ci_power_info *pi = rdev->pm.dpm.priv;
175 
176 	return pi;
177 }
178 
179 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
180 {
181 	struct ci_ps *ps = rps->ps_priv;
182 
183 	return ps;
184 }
185 
186 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
187 {
188 	struct ci_power_info *pi = ci_get_pi(rdev);
189 
190 	switch (rdev->pdev->device) {
191 	case 0x6649:
192 	case 0x6650:
193 	case 0x6651:
194 	case 0x6658:
195 	case 0x665C:
196 	case 0x665D:
197 	default:
198 		pi->powertune_defaults = &defaults_bonaire_xt;
199 		break;
200 	case 0x6640:
201 	case 0x6641:
202 	case 0x6646:
203 	case 0x6647:
204 		pi->powertune_defaults = &defaults_saturn_xt;
205 		break;
206 	case 0x67B8:
207 	case 0x67B0:
208 		pi->powertune_defaults = &defaults_hawaii_xt;
209 		break;
210 	case 0x67BA:
211 	case 0x67B1:
212 		pi->powertune_defaults = &defaults_hawaii_pro;
213 		break;
214 	case 0x67A0:
215 	case 0x67A1:
216 	case 0x67A2:
217 	case 0x67A8:
218 	case 0x67A9:
219 	case 0x67AA:
220 	case 0x67B9:
221 	case 0x67BE:
222 		pi->powertune_defaults = &defaults_bonaire_xt;
223 		break;
224 	}
225 
226 	pi->dte_tj_offset = 0;
227 
228 	pi->caps_power_containment = true;
229 	pi->caps_cac = false;
230 	pi->caps_sq_ramping = false;
231 	pi->caps_db_ramping = false;
232 	pi->caps_td_ramping = false;
233 	pi->caps_tcp_ramping = false;
234 
235 	if (pi->caps_power_containment) {
236 		pi->caps_cac = true;
237 		if (rdev->family == CHIP_HAWAII)
238 			pi->enable_bapm_feature = false;
239 		else
240 			pi->enable_bapm_feature = true;
241 		pi->enable_tdc_limit_feature = true;
242 		pi->enable_pkg_pwr_tracking_feature = true;
243 	}
244 }
245 
246 static u8 ci_convert_to_vid(u16 vddc)
247 {
248 	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
249 }
250 
251 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
252 {
253 	struct ci_power_info *pi = ci_get_pi(rdev);
254 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
255 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
256 	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
257 	u32 i;
258 
259 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
260 		return -EINVAL;
261 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
262 		return -EINVAL;
263 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
264 	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
265 		return -EINVAL;
266 
267 	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
268 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
269 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
270 			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
271 			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
272 		} else {
273 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
274 			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
275 		}
276 	}
277 	return 0;
278 }
279 
280 static int ci_populate_vddc_vid(struct radeon_device *rdev)
281 {
282 	struct ci_power_info *pi = ci_get_pi(rdev);
283 	u8 *vid = pi->smc_powertune_table.VddCVid;
284 	u32 i;
285 
286 	if (pi->vddc_voltage_table.count > 8)
287 		return -EINVAL;
288 
289 	for (i = 0; i < pi->vddc_voltage_table.count; i++)
290 		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
291 
292 	return 0;
293 }
294 
295 static int ci_populate_svi_load_line(struct radeon_device *rdev)
296 {
297 	struct ci_power_info *pi = ci_get_pi(rdev);
298 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
299 
300 	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
301 	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
302 	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
303 	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
304 
305 	return 0;
306 }
307 
308 static int ci_populate_tdc_limit(struct radeon_device *rdev)
309 {
310 	struct ci_power_info *pi = ci_get_pi(rdev);
311 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
312 	u16 tdc_limit;
313 
314 	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
315 	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
316 	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
317 		pt_defaults->tdc_vddc_throttle_release_limit_perc;
318 	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
319 
320 	return 0;
321 }
322 
323 static int ci_populate_dw8(struct radeon_device *rdev)
324 {
325 	struct ci_power_info *pi = ci_get_pi(rdev);
326 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
327 	int ret;
328 
329 	ret = ci_read_smc_sram_dword(rdev,
330 				     SMU7_FIRMWARE_HEADER_LOCATION +
331 				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
332 				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
333 				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
334 				     pi->sram_end);
335 	if (ret)
336 		return -EINVAL;
337 	else
338 		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
339 
340 	return 0;
341 }
342 
343 static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
344 {
345 	struct ci_power_info *pi = ci_get_pi(rdev);
346 
347 	if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
348 	    (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
349 		rdev->pm.dpm.fan.fan_output_sensitivity =
350 			rdev->pm.dpm.fan.default_fan_output_sensitivity;
351 
352 	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
353 		cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
354 
355 	return 0;
356 }
357 
358 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
359 {
360 	struct ci_power_info *pi = ci_get_pi(rdev);
361 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
362 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
363 	int i, min, max;
364 
365 	min = max = hi_vid[0];
366 	for (i = 0; i < 8; i++) {
367 		if (0 != hi_vid[i]) {
368 			if (min > hi_vid[i])
369 				min = hi_vid[i];
370 			if (max < hi_vid[i])
371 				max = hi_vid[i];
372 		}
373 
374 		if (0 != lo_vid[i]) {
375 			if (min > lo_vid[i])
376 				min = lo_vid[i];
377 			if (max < lo_vid[i])
378 				max = lo_vid[i];
379 		}
380 	}
381 
382 	if ((min == 0) || (max == 0))
383 		return -EINVAL;
384 	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
385 	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
386 
387 	return 0;
388 }
389 
390 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
391 {
392 	struct ci_power_info *pi = ci_get_pi(rdev);
393 	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
394 	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
395 	struct radeon_cac_tdp_table *cac_tdp_table =
396 		rdev->pm.dpm.dyn_state.cac_tdp_table;
397 
398 	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
399 	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
400 
401 	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
402 	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
403 
404 	return 0;
405 }
406 
407 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
408 {
409 	struct ci_power_info *pi = ci_get_pi(rdev);
410 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
411 	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
412 	struct radeon_cac_tdp_table *cac_tdp_table =
413 		rdev->pm.dpm.dyn_state.cac_tdp_table;
414 	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
415 	int i, j, k;
416 	const u16 *def1;
417 	const u16 *def2;
418 
419 	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
420 	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
421 
422 	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
423 	dpm_table->GpuTjMax =
424 		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
425 	dpm_table->GpuTjHyst = 8;
426 
427 	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
428 
429 	if (ppm) {
430 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
431 		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
432 	} else {
433 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
434 		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
435 	}
436 
437 	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
438 	def1 = pt_defaults->bapmti_r;
439 	def2 = pt_defaults->bapmti_rc;
440 
441 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
442 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
443 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
444 				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
445 				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
446 				def1++;
447 				def2++;
448 			}
449 		}
450 	}
451 
452 	return 0;
453 }
454 
455 static int ci_populate_pm_base(struct radeon_device *rdev)
456 {
457 	struct ci_power_info *pi = ci_get_pi(rdev);
458 	u32 pm_fuse_table_offset;
459 	int ret;
460 
461 	if (pi->caps_power_containment) {
462 		ret = ci_read_smc_sram_dword(rdev,
463 					     SMU7_FIRMWARE_HEADER_LOCATION +
464 					     offsetof(SMU7_Firmware_Header, PmFuseTable),
465 					     &pm_fuse_table_offset, pi->sram_end);
466 		if (ret)
467 			return ret;
468 		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
469 		if (ret)
470 			return ret;
471 		ret = ci_populate_vddc_vid(rdev);
472 		if (ret)
473 			return ret;
474 		ret = ci_populate_svi_load_line(rdev);
475 		if (ret)
476 			return ret;
477 		ret = ci_populate_tdc_limit(rdev);
478 		if (ret)
479 			return ret;
480 		ret = ci_populate_dw8(rdev);
481 		if (ret)
482 			return ret;
483 		ret = ci_populate_fuzzy_fan(rdev);
484 		if (ret)
485 			return ret;
486 		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
487 		if (ret)
488 			return ret;
489 		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
490 		if (ret)
491 			return ret;
492 		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
493 					   (u8 *)&pi->smc_powertune_table,
494 					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
495 		if (ret)
496 			return ret;
497 	}
498 
499 	return 0;
500 }
501 
502 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
503 {
504 	struct ci_power_info *pi = ci_get_pi(rdev);
505 	u32 data;
506 
507 	if (pi->caps_sq_ramping) {
508 		data = RREG32_DIDT(DIDT_SQ_CTRL0);
509 		if (enable)
510 			data |= DIDT_CTRL_EN;
511 		else
512 			data &= ~DIDT_CTRL_EN;
513 		WREG32_DIDT(DIDT_SQ_CTRL0, data);
514 	}
515 
516 	if (pi->caps_db_ramping) {
517 		data = RREG32_DIDT(DIDT_DB_CTRL0);
518 		if (enable)
519 			data |= DIDT_CTRL_EN;
520 		else
521 			data &= ~DIDT_CTRL_EN;
522 		WREG32_DIDT(DIDT_DB_CTRL0, data);
523 	}
524 
525 	if (pi->caps_td_ramping) {
526 		data = RREG32_DIDT(DIDT_TD_CTRL0);
527 		if (enable)
528 			data |= DIDT_CTRL_EN;
529 		else
530 			data &= ~DIDT_CTRL_EN;
531 		WREG32_DIDT(DIDT_TD_CTRL0, data);
532 	}
533 
534 	if (pi->caps_tcp_ramping) {
535 		data = RREG32_DIDT(DIDT_TCP_CTRL0);
536 		if (enable)
537 			data |= DIDT_CTRL_EN;
538 		else
539 			data &= ~DIDT_CTRL_EN;
540 		WREG32_DIDT(DIDT_TCP_CTRL0, data);
541 	}
542 }
543 
544 static int ci_program_pt_config_registers(struct radeon_device *rdev,
545 					  const struct ci_pt_config_reg *cac_config_regs)
546 {
547 	const struct ci_pt_config_reg *config_regs = cac_config_regs;
548 	u32 data;
549 	u32 cache = 0;
550 
551 	if (config_regs == NULL)
552 		return -EINVAL;
553 
554 	while (config_regs->offset != 0xFFFFFFFF) {
555 		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
556 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
557 		} else {
558 			switch (config_regs->type) {
559 			case CISLANDS_CONFIGREG_SMC_IND:
560 				data = RREG32_SMC(config_regs->offset);
561 				break;
562 			case CISLANDS_CONFIGREG_DIDT_IND:
563 				data = RREG32_DIDT(config_regs->offset);
564 				break;
565 			default:
566 				data = RREG32(config_regs->offset << 2);
567 				break;
568 			}
569 
570 			data &= ~config_regs->mask;
571 			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
572 			data |= cache;
573 
574 			switch (config_regs->type) {
575 			case CISLANDS_CONFIGREG_SMC_IND:
576 				WREG32_SMC(config_regs->offset, data);
577 				break;
578 			case CISLANDS_CONFIGREG_DIDT_IND:
579 				WREG32_DIDT(config_regs->offset, data);
580 				break;
581 			default:
582 				WREG32(config_regs->offset << 2, data);
583 				break;
584 			}
585 			cache = 0;
586 		}
587 		config_regs++;
588 	}
589 	return 0;
590 }
591 
592 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
593 {
594 	struct ci_power_info *pi = ci_get_pi(rdev);
595 	int ret;
596 
597 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
598 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
599 		cik_enter_rlc_safe_mode(rdev);
600 
601 		if (enable) {
602 			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
603 			if (ret) {
604 				cik_exit_rlc_safe_mode(rdev);
605 				return ret;
606 			}
607 		}
608 
609 		ci_do_enable_didt(rdev, enable);
610 
611 		cik_exit_rlc_safe_mode(rdev);
612 	}
613 
614 	return 0;
615 }
616 
617 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
618 {
619 	struct ci_power_info *pi = ci_get_pi(rdev);
620 	PPSMC_Result smc_result;
621 	int ret = 0;
622 
623 	if (enable) {
624 		pi->power_containment_features = 0;
625 		if (pi->caps_power_containment) {
626 			if (pi->enable_bapm_feature) {
627 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
628 				if (smc_result != PPSMC_Result_OK)
629 					ret = -EINVAL;
630 				else
631 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
632 			}
633 
634 			if (pi->enable_tdc_limit_feature) {
635 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
636 				if (smc_result != PPSMC_Result_OK)
637 					ret = -EINVAL;
638 				else
639 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
640 			}
641 
642 			if (pi->enable_pkg_pwr_tracking_feature) {
643 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
644 				if (smc_result != PPSMC_Result_OK) {
645 					ret = -EINVAL;
646 				} else {
647 					struct radeon_cac_tdp_table *cac_tdp_table =
648 						rdev->pm.dpm.dyn_state.cac_tdp_table;
649 					u32 default_pwr_limit =
650 						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
651 
652 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
653 
654 					ci_set_power_limit(rdev, default_pwr_limit);
655 				}
656 			}
657 		}
658 	} else {
659 		if (pi->caps_power_containment && pi->power_containment_features) {
660 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
661 				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
662 
663 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
664 				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
665 
666 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
667 				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
668 			pi->power_containment_features = 0;
669 		}
670 	}
671 
672 	return ret;
673 }
674 
675 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
676 {
677 	struct ci_power_info *pi = ci_get_pi(rdev);
678 	PPSMC_Result smc_result;
679 	int ret = 0;
680 
681 	if (pi->caps_cac) {
682 		if (enable) {
683 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
684 			if (smc_result != PPSMC_Result_OK) {
685 				ret = -EINVAL;
686 				pi->cac_enabled = false;
687 			} else {
688 				pi->cac_enabled = true;
689 			}
690 		} else if (pi->cac_enabled) {
691 			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
692 			pi->cac_enabled = false;
693 		}
694 	}
695 
696 	return ret;
697 }
698 
699 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
700 					    bool enable)
701 {
702 	struct ci_power_info *pi = ci_get_pi(rdev);
703 	PPSMC_Result smc_result = PPSMC_Result_OK;
704 
705 	if (pi->thermal_sclk_dpm_enabled) {
706 		if (enable)
707 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
708 		else
709 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
710 	}
711 
712 	if (smc_result == PPSMC_Result_OK)
713 		return 0;
714 	else
715 		return -EINVAL;
716 }
717 
718 static int ci_power_control_set_level(struct radeon_device *rdev)
719 {
720 	struct ci_power_info *pi = ci_get_pi(rdev);
721 	struct radeon_cac_tdp_table *cac_tdp_table =
722 		rdev->pm.dpm.dyn_state.cac_tdp_table;
723 	s32 adjust_percent;
724 	s32 target_tdp;
725 	int ret = 0;
726 	bool adjust_polarity = false; /* ??? */
727 
728 	if (pi->caps_power_containment) {
729 		adjust_percent = adjust_polarity ?
730 			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
731 		target_tdp = ((100 + adjust_percent) *
732 			      (s32)cac_tdp_table->configurable_tdp) / 100;
733 
734 		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
735 	}
736 
737 	return ret;
738 }
739 
740 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
741 {
742 	struct ci_power_info *pi = ci_get_pi(rdev);
743 
744 	if (pi->uvd_power_gated == gate)
745 		return;
746 
747 	pi->uvd_power_gated = gate;
748 
749 	ci_update_uvd_dpm(rdev, gate);
750 }
751 
752 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
753 {
754 	struct ci_power_info *pi = ci_get_pi(rdev);
755 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
756 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
757 
758 	/* disable mclk switching if the refresh is >120Hz, even if the
759         * blanking period would allow it
760         */
761 	if (r600_dpm_get_vrefresh(rdev) > 120)
762 		return true;
763 
764 	if (vblank_time < switch_limit)
765 		return true;
766 	else
767 		return false;
768 
769 }
770 
771 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
772 					struct radeon_ps *rps)
773 {
774 	struct ci_ps *ps = ci_get_ps(rps);
775 	struct ci_power_info *pi = ci_get_pi(rdev);
776 	struct radeon_clock_and_voltage_limits *max_limits;
777 	bool disable_mclk_switching;
778 	u32 sclk, mclk;
779 	int i;
780 
781 	if (rps->vce_active) {
782 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
783 		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
784 	} else {
785 		rps->evclk = 0;
786 		rps->ecclk = 0;
787 	}
788 
789 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
790 	    ci_dpm_vblank_too_short(rdev))
791 		disable_mclk_switching = true;
792 	else
793 		disable_mclk_switching = false;
794 
795 	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
796 		pi->battery_state = true;
797 	else
798 		pi->battery_state = false;
799 
800 	if (rdev->pm.dpm.ac_power)
801 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
802 	else
803 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
804 
805 	if (rdev->pm.dpm.ac_power == false) {
806 		for (i = 0; i < ps->performance_level_count; i++) {
807 			if (ps->performance_levels[i].mclk > max_limits->mclk)
808 				ps->performance_levels[i].mclk = max_limits->mclk;
809 			if (ps->performance_levels[i].sclk > max_limits->sclk)
810 				ps->performance_levels[i].sclk = max_limits->sclk;
811 		}
812 	}
813 
814 	/* XXX validate the min clocks required for display */
815 
816 	if (disable_mclk_switching) {
817 		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
818 		sclk = ps->performance_levels[0].sclk;
819 	} else {
820 		mclk = ps->performance_levels[0].mclk;
821 		sclk = ps->performance_levels[0].sclk;
822 	}
823 
824 	if (rps->vce_active) {
825 		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
826 			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
827 		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
828 			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
829 	}
830 
831 	ps->performance_levels[0].sclk = sclk;
832 	ps->performance_levels[0].mclk = mclk;
833 
834 	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
835 		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
836 
837 	if (disable_mclk_switching) {
838 		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
839 			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
840 	} else {
841 		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
842 			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
843 	}
844 }
845 
846 static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
847 					    int min_temp, int max_temp)
848 {
849 	int low_temp = 0 * 1000;
850 	int high_temp = 255 * 1000;
851 	u32 tmp;
852 
853 	if (low_temp < min_temp)
854 		low_temp = min_temp;
855 	if (high_temp > max_temp)
856 		high_temp = max_temp;
857 	if (high_temp < low_temp) {
858 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
859 		return -EINVAL;
860 	}
861 
862 	tmp = RREG32_SMC(CG_THERMAL_INT);
863 	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
864 	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
865 		CI_DIG_THERM_INTL(low_temp / 1000);
866 	WREG32_SMC(CG_THERMAL_INT, tmp);
867 
868 #if 0
869 	/* XXX: need to figure out how to handle this properly */
870 	tmp = RREG32_SMC(CG_THERMAL_CTRL);
871 	tmp &= DIG_THERM_DPM_MASK;
872 	tmp |= DIG_THERM_DPM(high_temp / 1000);
873 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
874 #endif
875 
876 	rdev->pm.dpm.thermal.min_temp = low_temp;
877 	rdev->pm.dpm.thermal.max_temp = high_temp;
878 
879 	return 0;
880 }
881 
882 static int ci_thermal_enable_alert(struct radeon_device *rdev,
883 				   bool enable)
884 {
885 	u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
886 	PPSMC_Result result;
887 
888 	if (enable) {
889 		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
890 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
891 		rdev->irq.dpm_thermal = false;
892 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
893 		if (result != PPSMC_Result_OK) {
894 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
895 			return -EINVAL;
896 		}
897 	} else {
898 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
899 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
900 		rdev->irq.dpm_thermal = true;
901 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
902 		if (result != PPSMC_Result_OK) {
903 			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
904 			return -EINVAL;
905 		}
906 	}
907 
908 	return 0;
909 }
910 
911 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
912 {
913 	struct ci_power_info *pi = ci_get_pi(rdev);
914 	u32 tmp;
915 
916 	if (pi->fan_ctrl_is_in_default_mode) {
917 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
918 		pi->fan_ctrl_default_mode = tmp;
919 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
920 		pi->t_min = tmp;
921 		pi->fan_ctrl_is_in_default_mode = false;
922 	}
923 
924 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
925 	tmp |= TMIN(0);
926 	WREG32_SMC(CG_FDO_CTRL2, tmp);
927 
928 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
929 	tmp |= FDO_PWM_MODE(mode);
930 	WREG32_SMC(CG_FDO_CTRL2, tmp);
931 }
932 
933 static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
934 {
935 	struct ci_power_info *pi = ci_get_pi(rdev);
936 	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
937 	u32 duty100;
938 	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
939 	u16 fdo_min, slope1, slope2;
940 	u32 reference_clock, tmp;
941 	int ret;
942 	u64 tmp64;
943 
944 	if (!pi->fan_table_start) {
945 		rdev->pm.dpm.fan.ucode_fan_control = false;
946 		return 0;
947 	}
948 
949 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
950 
951 	if (duty100 == 0) {
952 		rdev->pm.dpm.fan.ucode_fan_control = false;
953 		return 0;
954 	}
955 
956 	tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
957 	do_div(tmp64, 10000);
958 	fdo_min = (u16)tmp64;
959 
960 	t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
961 	t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
962 
963 	pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
964 	pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
965 
966 	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
967 	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
968 
969 	fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
970 	fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
971 	fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
972 
973 	fan_table.Slope1 = cpu_to_be16(slope1);
974 	fan_table.Slope2 = cpu_to_be16(slope2);
975 
976 	fan_table.FdoMin = cpu_to_be16(fdo_min);
977 
978 	fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
979 
980 	fan_table.HystUp = cpu_to_be16(1);
981 
982 	fan_table.HystSlope = cpu_to_be16(1);
983 
984 	fan_table.TempRespLim = cpu_to_be16(5);
985 
986 	reference_clock = radeon_get_xclk(rdev);
987 
988 	fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
989 					       reference_clock) / 1600);
990 
991 	fan_table.FdoMax = cpu_to_be16((u16)duty100);
992 
993 	tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
994 	fan_table.TempSrc = (uint8_t)tmp;
995 
996 	ret = ci_copy_bytes_to_smc(rdev,
997 				   pi->fan_table_start,
998 				   (u8 *)(&fan_table),
999 				   sizeof(fan_table),
1000 				   pi->sram_end);
1001 
1002 	if (ret) {
1003 		DRM_ERROR("Failed to load fan table to the SMC.");
1004 		rdev->pm.dpm.fan.ucode_fan_control = false;
1005 	}
1006 
1007 	return 0;
1008 }
1009 
1010 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1011 {
1012 	struct ci_power_info *pi = ci_get_pi(rdev);
1013 	PPSMC_Result ret;
1014 
1015 	if (pi->caps_od_fuzzy_fan_control_support) {
1016 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1017 							PPSMC_StartFanControl,
1018 							FAN_CONTROL_FUZZY);
1019 		if (ret != PPSMC_Result_OK)
1020 			return -EINVAL;
1021 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1022 							PPSMC_MSG_SetFanPwmMax,
1023 							rdev->pm.dpm.fan.default_max_fan_pwm);
1024 		if (ret != PPSMC_Result_OK)
1025 			return -EINVAL;
1026 	} else {
1027 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1028 							PPSMC_StartFanControl,
1029 							FAN_CONTROL_TABLE);
1030 		if (ret != PPSMC_Result_OK)
1031 			return -EINVAL;
1032 	}
1033 
1034 	pi->fan_is_controlled_by_smc = true;
1035 	return 0;
1036 }
1037 
1038 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1039 {
1040 	PPSMC_Result ret;
1041 	struct ci_power_info *pi = ci_get_pi(rdev);
1042 
1043 	ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1044 	if (ret == PPSMC_Result_OK) {
1045 		pi->fan_is_controlled_by_smc = false;
1046 		return 0;
1047 	} else
1048 		return -EINVAL;
1049 }
1050 
1051 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1052 					     u32 *speed)
1053 {
1054 	u32 duty, duty100;
1055 	u64 tmp64;
1056 
1057 	if (rdev->pm.no_fan)
1058 		return -ENOENT;
1059 
1060 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1061 	duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1062 
1063 	if (duty100 == 0)
1064 		return -EINVAL;
1065 
1066 	tmp64 = (u64)duty * 100;
1067 	do_div(tmp64, duty100);
1068 	*speed = (u32)tmp64;
1069 
1070 	if (*speed > 100)
1071 		*speed = 100;
1072 
1073 	return 0;
1074 }
1075 
1076 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1077 					     u32 speed)
1078 {
1079 	u32 tmp;
1080 	u32 duty, duty100;
1081 	u64 tmp64;
1082 	struct ci_power_info *pi = ci_get_pi(rdev);
1083 
1084 	if (rdev->pm.no_fan)
1085 		return -ENOENT;
1086 
1087 	if (pi->fan_is_controlled_by_smc)
1088 		return -EINVAL;
1089 
1090 	if (speed > 100)
1091 		return -EINVAL;
1092 
1093 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1094 
1095 	if (duty100 == 0)
1096 		return -EINVAL;
1097 
1098 	tmp64 = (u64)speed * duty100;
1099 	do_div(tmp64, 100);
1100 	duty = (u32)tmp64;
1101 
1102 	tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1103 	tmp |= FDO_STATIC_DUTY(duty);
1104 	WREG32_SMC(CG_FDO_CTRL0, tmp);
1105 
1106 	return 0;
1107 }
1108 
1109 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
1110 {
1111 	if (mode) {
1112 		/* stop auto-manage */
1113 		if (rdev->pm.dpm.fan.ucode_fan_control)
1114 			ci_fan_ctrl_stop_smc_fan_control(rdev);
1115 		ci_fan_ctrl_set_static_mode(rdev, mode);
1116 	} else {
1117 		/* restart auto-manage */
1118 		if (rdev->pm.dpm.fan.ucode_fan_control)
1119 			ci_thermal_start_smc_fan_control(rdev);
1120 		else
1121 			ci_fan_ctrl_set_default_mode(rdev);
1122 	}
1123 }
1124 
1125 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
1126 {
1127 	struct ci_power_info *pi = ci_get_pi(rdev);
1128 	u32 tmp;
1129 
1130 	if (pi->fan_is_controlled_by_smc)
1131 		return 0;
1132 
1133 	tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
1134 	return (tmp >> FDO_PWM_MODE_SHIFT);
1135 }
1136 
1137 #if 0
1138 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1139 					 u32 *speed)
1140 {
1141 	u32 tach_period;
1142 	u32 xclk = radeon_get_xclk(rdev);
1143 
1144 	if (rdev->pm.no_fan)
1145 		return -ENOENT;
1146 
1147 	if (rdev->pm.fan_pulses_per_revolution == 0)
1148 		return -ENOENT;
1149 
1150 	tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1151 	if (tach_period == 0)
1152 		return -ENOENT;
1153 
1154 	*speed = 60 * xclk * 10000 / tach_period;
1155 
1156 	return 0;
1157 }
1158 
1159 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1160 					 u32 speed)
1161 {
1162 	u32 tach_period, tmp;
1163 	u32 xclk = radeon_get_xclk(rdev);
1164 
1165 	if (rdev->pm.no_fan)
1166 		return -ENOENT;
1167 
1168 	if (rdev->pm.fan_pulses_per_revolution == 0)
1169 		return -ENOENT;
1170 
1171 	if ((speed < rdev->pm.fan_min_rpm) ||
1172 	    (speed > rdev->pm.fan_max_rpm))
1173 		return -EINVAL;
1174 
1175 	if (rdev->pm.dpm.fan.ucode_fan_control)
1176 		ci_fan_ctrl_stop_smc_fan_control(rdev);
1177 
1178 	tach_period = 60 * xclk * 10000 / (8 * speed);
1179 	tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1180 	tmp |= TARGET_PERIOD(tach_period);
1181 	WREG32_SMC(CG_TACH_CTRL, tmp);
1182 
1183 	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1184 
1185 	return 0;
1186 }
1187 #endif
1188 
1189 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1190 {
1191 	struct ci_power_info *pi = ci_get_pi(rdev);
1192 	u32 tmp;
1193 
1194 	if (!pi->fan_ctrl_is_in_default_mode) {
1195 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1196 		tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1197 		WREG32_SMC(CG_FDO_CTRL2, tmp);
1198 
1199 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1200 		tmp |= TMIN(pi->t_min);
1201 		WREG32_SMC(CG_FDO_CTRL2, tmp);
1202 		pi->fan_ctrl_is_in_default_mode = true;
1203 	}
1204 }
1205 
1206 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1207 {
1208 	if (rdev->pm.dpm.fan.ucode_fan_control) {
1209 		ci_fan_ctrl_start_smc_fan_control(rdev);
1210 		ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1211 	}
1212 }
1213 
1214 static void ci_thermal_initialize(struct radeon_device *rdev)
1215 {
1216 	u32 tmp;
1217 
1218 	if (rdev->pm.fan_pulses_per_revolution) {
1219 		tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1220 		tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1221 		WREG32_SMC(CG_TACH_CTRL, tmp);
1222 	}
1223 
1224 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1225 	tmp |= TACH_PWM_RESP_RATE(0x28);
1226 	WREG32_SMC(CG_FDO_CTRL2, tmp);
1227 }
1228 
1229 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1230 {
1231 	int ret;
1232 
1233 	ci_thermal_initialize(rdev);
1234 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1235 	if (ret)
1236 		return ret;
1237 	ret = ci_thermal_enable_alert(rdev, true);
1238 	if (ret)
1239 		return ret;
1240 	if (rdev->pm.dpm.fan.ucode_fan_control) {
1241 		ret = ci_thermal_setup_fan_table(rdev);
1242 		if (ret)
1243 			return ret;
1244 		ci_thermal_start_smc_fan_control(rdev);
1245 	}
1246 
1247 	return 0;
1248 }
1249 
1250 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1251 {
1252 	if (!rdev->pm.no_fan)
1253 		ci_fan_ctrl_set_default_mode(rdev);
1254 }
1255 
1256 #if 0
1257 static int ci_read_smc_soft_register(struct radeon_device *rdev,
1258 				     u16 reg_offset, u32 *value)
1259 {
1260 	struct ci_power_info *pi = ci_get_pi(rdev);
1261 
1262 	return ci_read_smc_sram_dword(rdev,
1263 				      pi->soft_regs_start + reg_offset,
1264 				      value, pi->sram_end);
1265 }
1266 #endif
1267 
1268 static int ci_write_smc_soft_register(struct radeon_device *rdev,
1269 				      u16 reg_offset, u32 value)
1270 {
1271 	struct ci_power_info *pi = ci_get_pi(rdev);
1272 
1273 	return ci_write_smc_sram_dword(rdev,
1274 				       pi->soft_regs_start + reg_offset,
1275 				       value, pi->sram_end);
1276 }
1277 
1278 static void ci_init_fps_limits(struct radeon_device *rdev)
1279 {
1280 	struct ci_power_info *pi = ci_get_pi(rdev);
1281 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1282 
1283 	if (pi->caps_fps) {
1284 		u16 tmp;
1285 
1286 		tmp = 45;
1287 		table->FpsHighT = cpu_to_be16(tmp);
1288 
1289 		tmp = 30;
1290 		table->FpsLowT = cpu_to_be16(tmp);
1291 	}
1292 }
1293 
1294 static int ci_update_sclk_t(struct radeon_device *rdev)
1295 {
1296 	struct ci_power_info *pi = ci_get_pi(rdev);
1297 	int ret = 0;
1298 	u32 low_sclk_interrupt_t = 0;
1299 
1300 	if (pi->caps_sclk_throttle_low_notification) {
1301 		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1302 
1303 		ret = ci_copy_bytes_to_smc(rdev,
1304 					   pi->dpm_table_start +
1305 					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1306 					   (u8 *)&low_sclk_interrupt_t,
1307 					   sizeof(u32), pi->sram_end);
1308 
1309 	}
1310 
1311 	return ret;
1312 }
1313 
1314 static void ci_get_leakage_voltages(struct radeon_device *rdev)
1315 {
1316 	struct ci_power_info *pi = ci_get_pi(rdev);
1317 	u16 leakage_id, virtual_voltage_id;
1318 	u16 vddc, vddci;
1319 	int i;
1320 
1321 	pi->vddc_leakage.count = 0;
1322 	pi->vddci_leakage.count = 0;
1323 
1324 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1325 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1326 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1327 			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
1328 				continue;
1329 			if (vddc != 0 && vddc != virtual_voltage_id) {
1330 				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1331 				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1332 				pi->vddc_leakage.count++;
1333 			}
1334 		}
1335 	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
1336 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1337 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1338 			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
1339 										 virtual_voltage_id,
1340 										 leakage_id) == 0) {
1341 				if (vddc != 0 && vddc != virtual_voltage_id) {
1342 					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1343 					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1344 					pi->vddc_leakage.count++;
1345 				}
1346 				if (vddci != 0 && vddci != virtual_voltage_id) {
1347 					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1348 					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1349 					pi->vddci_leakage.count++;
1350 				}
1351 			}
1352 		}
1353 	}
1354 }
1355 
1356 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1357 {
1358 	struct ci_power_info *pi = ci_get_pi(rdev);
1359 	bool want_thermal_protection;
1360 	u32 tmp;
1361 
1362 	switch (sources) {
1363 	case 0:
1364 	default:
1365 		want_thermal_protection = false;
1366 		break;
1367 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1368 		want_thermal_protection = true;
1369 		break;
1370 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1371 		want_thermal_protection = true;
1372 		break;
1373 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1374 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1375 		want_thermal_protection = true;
1376 		break;
1377 	}
1378 
1379 	if (want_thermal_protection) {
1380 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1381 		if (pi->thermal_protection)
1382 			tmp &= ~THERMAL_PROTECTION_DIS;
1383 		else
1384 			tmp |= THERMAL_PROTECTION_DIS;
1385 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1386 	} else {
1387 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1388 		tmp |= THERMAL_PROTECTION_DIS;
1389 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1390 	}
1391 }
1392 
1393 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1394 					   enum radeon_dpm_auto_throttle_src source,
1395 					   bool enable)
1396 {
1397 	struct ci_power_info *pi = ci_get_pi(rdev);
1398 
1399 	if (enable) {
1400 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1401 			pi->active_auto_throttle_sources |= 1 << source;
1402 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1403 		}
1404 	} else {
1405 		if (pi->active_auto_throttle_sources & (1 << source)) {
1406 			pi->active_auto_throttle_sources &= ~(1 << source);
1407 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1408 		}
1409 	}
1410 }
1411 
1412 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1413 {
1414 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1415 		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1416 }
1417 
1418 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1419 {
1420 	struct ci_power_info *pi = ci_get_pi(rdev);
1421 	PPSMC_Result smc_result;
1422 
1423 	if (!pi->need_update_smu7_dpm_table)
1424 		return 0;
1425 
1426 	if ((!pi->sclk_dpm_key_disabled) &&
1427 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1428 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1429 		if (smc_result != PPSMC_Result_OK)
1430 			return -EINVAL;
1431 	}
1432 
1433 	if ((!pi->mclk_dpm_key_disabled) &&
1434 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1435 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1436 		if (smc_result != PPSMC_Result_OK)
1437 			return -EINVAL;
1438 	}
1439 
1440 	pi->need_update_smu7_dpm_table = 0;
1441 	return 0;
1442 }
1443 
1444 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1445 {
1446 	struct ci_power_info *pi = ci_get_pi(rdev);
1447 	PPSMC_Result smc_result;
1448 
1449 	if (enable) {
1450 		if (!pi->sclk_dpm_key_disabled) {
1451 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1452 			if (smc_result != PPSMC_Result_OK)
1453 				return -EINVAL;
1454 		}
1455 
1456 		if (!pi->mclk_dpm_key_disabled) {
1457 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1458 			if (smc_result != PPSMC_Result_OK)
1459 				return -EINVAL;
1460 
1461 			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1462 
1463 			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1464 			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1465 			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1466 
1467 			udelay(10);
1468 
1469 			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1470 			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1471 			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1472 		}
1473 	} else {
1474 		if (!pi->sclk_dpm_key_disabled) {
1475 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1476 			if (smc_result != PPSMC_Result_OK)
1477 				return -EINVAL;
1478 		}
1479 
1480 		if (!pi->mclk_dpm_key_disabled) {
1481 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1482 			if (smc_result != PPSMC_Result_OK)
1483 				return -EINVAL;
1484 		}
1485 	}
1486 
1487 	return 0;
1488 }
1489 
1490 static int ci_start_dpm(struct radeon_device *rdev)
1491 {
1492 	struct ci_power_info *pi = ci_get_pi(rdev);
1493 	PPSMC_Result smc_result;
1494 	int ret;
1495 	u32 tmp;
1496 
1497 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1498 	tmp |= GLOBAL_PWRMGT_EN;
1499 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1500 
1501 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1502 	tmp |= DYNAMIC_PM_EN;
1503 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1504 
1505 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1506 
1507 	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1508 
1509 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1510 	if (smc_result != PPSMC_Result_OK)
1511 		return -EINVAL;
1512 
1513 	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1514 	if (ret)
1515 		return ret;
1516 
1517 	if (!pi->pcie_dpm_key_disabled) {
1518 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1519 		if (smc_result != PPSMC_Result_OK)
1520 			return -EINVAL;
1521 	}
1522 
1523 	return 0;
1524 }
1525 
1526 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1527 {
1528 	struct ci_power_info *pi = ci_get_pi(rdev);
1529 	PPSMC_Result smc_result;
1530 
1531 	if (!pi->need_update_smu7_dpm_table)
1532 		return 0;
1533 
1534 	if ((!pi->sclk_dpm_key_disabled) &&
1535 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1536 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1537 		if (smc_result != PPSMC_Result_OK)
1538 			return -EINVAL;
1539 	}
1540 
1541 	if ((!pi->mclk_dpm_key_disabled) &&
1542 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1543 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1544 		if (smc_result != PPSMC_Result_OK)
1545 			return -EINVAL;
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 static int ci_stop_dpm(struct radeon_device *rdev)
1552 {
1553 	struct ci_power_info *pi = ci_get_pi(rdev);
1554 	PPSMC_Result smc_result;
1555 	int ret;
1556 	u32 tmp;
1557 
1558 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1559 	tmp &= ~GLOBAL_PWRMGT_EN;
1560 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1561 
1562 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1563 	tmp &= ~DYNAMIC_PM_EN;
1564 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1565 
1566 	if (!pi->pcie_dpm_key_disabled) {
1567 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1568 		if (smc_result != PPSMC_Result_OK)
1569 			return -EINVAL;
1570 	}
1571 
1572 	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1573 	if (ret)
1574 		return ret;
1575 
1576 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1577 	if (smc_result != PPSMC_Result_OK)
1578 		return -EINVAL;
1579 
1580 	return 0;
1581 }
1582 
1583 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1584 {
1585 	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1586 
1587 	if (enable)
1588 		tmp &= ~SCLK_PWRMGT_OFF;
1589 	else
1590 		tmp |= SCLK_PWRMGT_OFF;
1591 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1592 }
1593 
1594 #if 0
1595 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1596 					bool ac_power)
1597 {
1598 	struct ci_power_info *pi = ci_get_pi(rdev);
1599 	struct radeon_cac_tdp_table *cac_tdp_table =
1600 		rdev->pm.dpm.dyn_state.cac_tdp_table;
1601 	u32 power_limit;
1602 
1603 	if (ac_power)
1604 		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1605 	else
1606 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1607 
1608 	ci_set_power_limit(rdev, power_limit);
1609 
1610 	if (pi->caps_automatic_dc_transition) {
1611 		if (ac_power)
1612 			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1613 		else
1614 			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1615 	}
1616 
1617 	return 0;
1618 }
1619 #endif
1620 
1621 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
1622 {
1623 	u32 tmp;
1624 	int i;
1625 
1626 	if (!ci_is_smc_running(rdev))
1627 		return PPSMC_Result_Failed;
1628 
1629 	WREG32(SMC_MESSAGE_0, msg);
1630 
1631 	for (i = 0; i < rdev->usec_timeout; i++) {
1632 		tmp = RREG32(SMC_RESP_0);
1633 		if (tmp != 0)
1634 			break;
1635 		udelay(1);
1636 	}
1637 	tmp = RREG32(SMC_RESP_0);
1638 
1639 	return (PPSMC_Result)tmp;
1640 }
1641 
1642 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1643 						      PPSMC_Msg msg, u32 parameter)
1644 {
1645 	WREG32(SMC_MSG_ARG_0, parameter);
1646 	return ci_send_msg_to_smc(rdev, msg);
1647 }
1648 
1649 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1650 							PPSMC_Msg msg, u32 *parameter)
1651 {
1652 	PPSMC_Result smc_result;
1653 
1654 	smc_result = ci_send_msg_to_smc(rdev, msg);
1655 
1656 	if ((smc_result == PPSMC_Result_OK) && parameter)
1657 		*parameter = RREG32(SMC_MSG_ARG_0);
1658 
1659 	return smc_result;
1660 }
1661 
1662 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1663 {
1664 	struct ci_power_info *pi = ci_get_pi(rdev);
1665 
1666 	if (!pi->sclk_dpm_key_disabled) {
1667 		PPSMC_Result smc_result =
1668 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1669 		if (smc_result != PPSMC_Result_OK)
1670 			return -EINVAL;
1671 	}
1672 
1673 	return 0;
1674 }
1675 
1676 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1677 {
1678 	struct ci_power_info *pi = ci_get_pi(rdev);
1679 
1680 	if (!pi->mclk_dpm_key_disabled) {
1681 		PPSMC_Result smc_result =
1682 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1683 		if (smc_result != PPSMC_Result_OK)
1684 			return -EINVAL;
1685 	}
1686 
1687 	return 0;
1688 }
1689 
1690 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1691 {
1692 	struct ci_power_info *pi = ci_get_pi(rdev);
1693 
1694 	if (!pi->pcie_dpm_key_disabled) {
1695 		PPSMC_Result smc_result =
1696 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1697 		if (smc_result != PPSMC_Result_OK)
1698 			return -EINVAL;
1699 	}
1700 
1701 	return 0;
1702 }
1703 
1704 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1705 {
1706 	struct ci_power_info *pi = ci_get_pi(rdev);
1707 
1708 	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1709 		PPSMC_Result smc_result =
1710 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1711 		if (smc_result != PPSMC_Result_OK)
1712 			return -EINVAL;
1713 	}
1714 
1715 	return 0;
1716 }
1717 
1718 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1719 				       u32 target_tdp)
1720 {
1721 	PPSMC_Result smc_result =
1722 		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1723 	if (smc_result != PPSMC_Result_OK)
1724 		return -EINVAL;
1725 	return 0;
1726 }
1727 
1728 #if 0
1729 static int ci_set_boot_state(struct radeon_device *rdev)
1730 {
1731 	return ci_enable_sclk_mclk_dpm(rdev, false);
1732 }
1733 #endif
1734 
1735 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1736 {
1737 	u32 sclk_freq;
1738 	PPSMC_Result smc_result =
1739 		ci_send_msg_to_smc_return_parameter(rdev,
1740 						    PPSMC_MSG_API_GetSclkFrequency,
1741 						    &sclk_freq);
1742 	if (smc_result != PPSMC_Result_OK)
1743 		sclk_freq = 0;
1744 
1745 	return sclk_freq;
1746 }
1747 
1748 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1749 {
1750 	u32 mclk_freq;
1751 	PPSMC_Result smc_result =
1752 		ci_send_msg_to_smc_return_parameter(rdev,
1753 						    PPSMC_MSG_API_GetMclkFrequency,
1754 						    &mclk_freq);
1755 	if (smc_result != PPSMC_Result_OK)
1756 		mclk_freq = 0;
1757 
1758 	return mclk_freq;
1759 }
1760 
1761 static void ci_dpm_start_smc(struct radeon_device *rdev)
1762 {
1763 	int i;
1764 
1765 	ci_program_jump_on_start(rdev);
1766 	ci_start_smc_clock(rdev);
1767 	ci_start_smc(rdev);
1768 	for (i = 0; i < rdev->usec_timeout; i++) {
1769 		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1770 			break;
1771 	}
1772 }
1773 
1774 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1775 {
1776 	ci_reset_smc(rdev);
1777 	ci_stop_smc_clock(rdev);
1778 }
1779 
1780 static int ci_process_firmware_header(struct radeon_device *rdev)
1781 {
1782 	struct ci_power_info *pi = ci_get_pi(rdev);
1783 	u32 tmp;
1784 	int ret;
1785 
1786 	ret = ci_read_smc_sram_dword(rdev,
1787 				     SMU7_FIRMWARE_HEADER_LOCATION +
1788 				     offsetof(SMU7_Firmware_Header, DpmTable),
1789 				     &tmp, pi->sram_end);
1790 	if (ret)
1791 		return ret;
1792 
1793 	pi->dpm_table_start = tmp;
1794 
1795 	ret = ci_read_smc_sram_dword(rdev,
1796 				     SMU7_FIRMWARE_HEADER_LOCATION +
1797 				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1798 				     &tmp, pi->sram_end);
1799 	if (ret)
1800 		return ret;
1801 
1802 	pi->soft_regs_start = tmp;
1803 
1804 	ret = ci_read_smc_sram_dword(rdev,
1805 				     SMU7_FIRMWARE_HEADER_LOCATION +
1806 				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1807 				     &tmp, pi->sram_end);
1808 	if (ret)
1809 		return ret;
1810 
1811 	pi->mc_reg_table_start = tmp;
1812 
1813 	ret = ci_read_smc_sram_dword(rdev,
1814 				     SMU7_FIRMWARE_HEADER_LOCATION +
1815 				     offsetof(SMU7_Firmware_Header, FanTable),
1816 				     &tmp, pi->sram_end);
1817 	if (ret)
1818 		return ret;
1819 
1820 	pi->fan_table_start = tmp;
1821 
1822 	ret = ci_read_smc_sram_dword(rdev,
1823 				     SMU7_FIRMWARE_HEADER_LOCATION +
1824 				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1825 				     &tmp, pi->sram_end);
1826 	if (ret)
1827 		return ret;
1828 
1829 	pi->arb_table_start = tmp;
1830 
1831 	return 0;
1832 }
1833 
1834 static void ci_read_clock_registers(struct radeon_device *rdev)
1835 {
1836 	struct ci_power_info *pi = ci_get_pi(rdev);
1837 
1838 	pi->clock_registers.cg_spll_func_cntl =
1839 		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1840 	pi->clock_registers.cg_spll_func_cntl_2 =
1841 		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1842 	pi->clock_registers.cg_spll_func_cntl_3 =
1843 		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1844 	pi->clock_registers.cg_spll_func_cntl_4 =
1845 		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1846 	pi->clock_registers.cg_spll_spread_spectrum =
1847 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1848 	pi->clock_registers.cg_spll_spread_spectrum_2 =
1849 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1850 	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1851 	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1852 	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1853 	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1854 	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1855 	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1856 	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1857 	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1858 	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1859 }
1860 
1861 static void ci_init_sclk_t(struct radeon_device *rdev)
1862 {
1863 	struct ci_power_info *pi = ci_get_pi(rdev);
1864 
1865 	pi->low_sclk_interrupt_t = 0;
1866 }
1867 
1868 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1869 					 bool enable)
1870 {
1871 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1872 
1873 	if (enable)
1874 		tmp &= ~THERMAL_PROTECTION_DIS;
1875 	else
1876 		tmp |= THERMAL_PROTECTION_DIS;
1877 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1878 }
1879 
1880 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1881 {
1882 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1883 
1884 	tmp |= STATIC_PM_EN;
1885 
1886 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1887 }
1888 
1889 #if 0
1890 static int ci_enter_ulp_state(struct radeon_device *rdev)
1891 {
1892 
1893 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1894 
1895 	udelay(25000);
1896 
1897 	return 0;
1898 }
1899 
1900 static int ci_exit_ulp_state(struct radeon_device *rdev)
1901 {
1902 	int i;
1903 
1904 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1905 
1906 	udelay(7000);
1907 
1908 	for (i = 0; i < rdev->usec_timeout; i++) {
1909 		if (RREG32(SMC_RESP_0) == 1)
1910 			break;
1911 		udelay(1000);
1912 	}
1913 
1914 	return 0;
1915 }
1916 #endif
1917 
1918 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1919 					bool has_display)
1920 {
1921 	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1922 
1923 	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1924 }
1925 
1926 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1927 				      bool enable)
1928 {
1929 	struct ci_power_info *pi = ci_get_pi(rdev);
1930 
1931 	if (enable) {
1932 		if (pi->caps_sclk_ds) {
1933 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1934 				return -EINVAL;
1935 		} else {
1936 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1937 				return -EINVAL;
1938 		}
1939 	} else {
1940 		if (pi->caps_sclk_ds) {
1941 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1942 				return -EINVAL;
1943 		}
1944 	}
1945 
1946 	return 0;
1947 }
1948 
1949 static void ci_program_display_gap(struct radeon_device *rdev)
1950 {
1951 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1952 	u32 pre_vbi_time_in_us;
1953 	u32 frame_time_in_us;
1954 	u32 ref_clock = rdev->clock.spll.reference_freq;
1955 	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1956 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1957 
1958 	tmp &= ~DISP_GAP_MASK;
1959 	if (rdev->pm.dpm.new_active_crtc_count > 0)
1960 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1961 	else
1962 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1963 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1964 
1965 	if (refresh_rate == 0)
1966 		refresh_rate = 60;
1967 	if (vblank_time == 0xffffffff)
1968 		vblank_time = 500;
1969 	frame_time_in_us = 1000000 / refresh_rate;
1970 	pre_vbi_time_in_us =
1971 		frame_time_in_us - 200 - vblank_time;
1972 	tmp = pre_vbi_time_in_us * (ref_clock / 100);
1973 
1974 	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1975 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1976 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1977 
1978 
1979 	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1980 
1981 }
1982 
1983 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1984 {
1985 	struct ci_power_info *pi = ci_get_pi(rdev);
1986 	u32 tmp;
1987 
1988 	if (enable) {
1989 		if (pi->caps_sclk_ss_support) {
1990 			tmp = RREG32_SMC(GENERAL_PWRMGT);
1991 			tmp |= DYN_SPREAD_SPECTRUM_EN;
1992 			WREG32_SMC(GENERAL_PWRMGT, tmp);
1993 		}
1994 	} else {
1995 		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1996 		tmp &= ~SSEN;
1997 		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1998 
1999 		tmp = RREG32_SMC(GENERAL_PWRMGT);
2000 		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
2001 		WREG32_SMC(GENERAL_PWRMGT, tmp);
2002 	}
2003 }
2004 
2005 static void ci_program_sstp(struct radeon_device *rdev)
2006 {
2007 	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
2008 }
2009 
2010 static void ci_enable_display_gap(struct radeon_device *rdev)
2011 {
2012 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
2013 
2014 	tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
2015 	tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
2016 		DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
2017 
2018 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
2019 }
2020 
2021 static void ci_program_vc(struct radeon_device *rdev)
2022 {
2023 	u32 tmp;
2024 
2025 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2026 	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
2027 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2028 
2029 	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
2030 	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
2031 	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
2032 	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
2033 	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
2034 	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
2035 	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
2036 	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
2037 }
2038 
2039 static void ci_clear_vc(struct radeon_device *rdev)
2040 {
2041 	u32 tmp;
2042 
2043 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2044 	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
2045 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2046 
2047 	WREG32_SMC(CG_FTV_0, 0);
2048 	WREG32_SMC(CG_FTV_1, 0);
2049 	WREG32_SMC(CG_FTV_2, 0);
2050 	WREG32_SMC(CG_FTV_3, 0);
2051 	WREG32_SMC(CG_FTV_4, 0);
2052 	WREG32_SMC(CG_FTV_5, 0);
2053 	WREG32_SMC(CG_FTV_6, 0);
2054 	WREG32_SMC(CG_FTV_7, 0);
2055 }
2056 
2057 static int ci_upload_firmware(struct radeon_device *rdev)
2058 {
2059 	struct ci_power_info *pi = ci_get_pi(rdev);
2060 	int i, ret;
2061 
2062 	for (i = 0; i < rdev->usec_timeout; i++) {
2063 		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
2064 			break;
2065 	}
2066 	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
2067 
2068 	ci_stop_smc_clock(rdev);
2069 	ci_reset_smc(rdev);
2070 
2071 	ret = ci_load_smc_ucode(rdev, pi->sram_end);
2072 
2073 	return ret;
2074 
2075 }
2076 
2077 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
2078 				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
2079 				     struct atom_voltage_table *voltage_table)
2080 {
2081 	u32 i;
2082 
2083 	if (voltage_dependency_table == NULL)
2084 		return -EINVAL;
2085 
2086 	voltage_table->mask_low = 0;
2087 	voltage_table->phase_delay = 0;
2088 
2089 	voltage_table->count = voltage_dependency_table->count;
2090 	for (i = 0; i < voltage_table->count; i++) {
2091 		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2092 		voltage_table->entries[i].smio_low = 0;
2093 	}
2094 
2095 	return 0;
2096 }
2097 
2098 static int ci_construct_voltage_tables(struct radeon_device *rdev)
2099 {
2100 	struct ci_power_info *pi = ci_get_pi(rdev);
2101 	int ret;
2102 
2103 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2104 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
2105 						    VOLTAGE_OBJ_GPIO_LUT,
2106 						    &pi->vddc_voltage_table);
2107 		if (ret)
2108 			return ret;
2109 	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2110 		ret = ci_get_svi2_voltage_table(rdev,
2111 						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2112 						&pi->vddc_voltage_table);
2113 		if (ret)
2114 			return ret;
2115 	}
2116 
2117 	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2118 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
2119 							 &pi->vddc_voltage_table);
2120 
2121 	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2122 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
2123 						    VOLTAGE_OBJ_GPIO_LUT,
2124 						    &pi->vddci_voltage_table);
2125 		if (ret)
2126 			return ret;
2127 	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2128 		ret = ci_get_svi2_voltage_table(rdev,
2129 						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2130 						&pi->vddci_voltage_table);
2131 		if (ret)
2132 			return ret;
2133 	}
2134 
2135 	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2136 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
2137 							 &pi->vddci_voltage_table);
2138 
2139 	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2140 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
2141 						    VOLTAGE_OBJ_GPIO_LUT,
2142 						    &pi->mvdd_voltage_table);
2143 		if (ret)
2144 			return ret;
2145 	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2146 		ret = ci_get_svi2_voltage_table(rdev,
2147 						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2148 						&pi->mvdd_voltage_table);
2149 		if (ret)
2150 			return ret;
2151 	}
2152 
2153 	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2154 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
2155 							 &pi->mvdd_voltage_table);
2156 
2157 	return 0;
2158 }
2159 
2160 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
2161 					  struct atom_voltage_table_entry *voltage_table,
2162 					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
2163 {
2164 	int ret;
2165 
2166 	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
2167 					    &smc_voltage_table->StdVoltageHiSidd,
2168 					    &smc_voltage_table->StdVoltageLoSidd);
2169 
2170 	if (ret) {
2171 		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2172 		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2173 	}
2174 
2175 	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2176 	smc_voltage_table->StdVoltageHiSidd =
2177 		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2178 	smc_voltage_table->StdVoltageLoSidd =
2179 		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2180 }
2181 
2182 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
2183 				      SMU7_Discrete_DpmTable *table)
2184 {
2185 	struct ci_power_info *pi = ci_get_pi(rdev);
2186 	unsigned int count;
2187 
2188 	table->VddcLevelCount = pi->vddc_voltage_table.count;
2189 	for (count = 0; count < table->VddcLevelCount; count++) {
2190 		ci_populate_smc_voltage_table(rdev,
2191 					      &pi->vddc_voltage_table.entries[count],
2192 					      &table->VddcLevel[count]);
2193 
2194 		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2195 			table->VddcLevel[count].Smio |=
2196 				pi->vddc_voltage_table.entries[count].smio_low;
2197 		else
2198 			table->VddcLevel[count].Smio = 0;
2199 	}
2200 	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2201 
2202 	return 0;
2203 }
2204 
2205 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
2206 				       SMU7_Discrete_DpmTable *table)
2207 {
2208 	unsigned int count;
2209 	struct ci_power_info *pi = ci_get_pi(rdev);
2210 
2211 	table->VddciLevelCount = pi->vddci_voltage_table.count;
2212 	for (count = 0; count < table->VddciLevelCount; count++) {
2213 		ci_populate_smc_voltage_table(rdev,
2214 					      &pi->vddci_voltage_table.entries[count],
2215 					      &table->VddciLevel[count]);
2216 
2217 		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2218 			table->VddciLevel[count].Smio |=
2219 				pi->vddci_voltage_table.entries[count].smio_low;
2220 		else
2221 			table->VddciLevel[count].Smio = 0;
2222 	}
2223 	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2224 
2225 	return 0;
2226 }
2227 
2228 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
2229 				      SMU7_Discrete_DpmTable *table)
2230 {
2231 	struct ci_power_info *pi = ci_get_pi(rdev);
2232 	unsigned int count;
2233 
2234 	table->MvddLevelCount = pi->mvdd_voltage_table.count;
2235 	for (count = 0; count < table->MvddLevelCount; count++) {
2236 		ci_populate_smc_voltage_table(rdev,
2237 					      &pi->mvdd_voltage_table.entries[count],
2238 					      &table->MvddLevel[count]);
2239 
2240 		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2241 			table->MvddLevel[count].Smio |=
2242 				pi->mvdd_voltage_table.entries[count].smio_low;
2243 		else
2244 			table->MvddLevel[count].Smio = 0;
2245 	}
2246 	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2247 
2248 	return 0;
2249 }
2250 
2251 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
2252 					  SMU7_Discrete_DpmTable *table)
2253 {
2254 	int ret;
2255 
2256 	ret = ci_populate_smc_vddc_table(rdev, table);
2257 	if (ret)
2258 		return ret;
2259 
2260 	ret = ci_populate_smc_vddci_table(rdev, table);
2261 	if (ret)
2262 		return ret;
2263 
2264 	ret = ci_populate_smc_mvdd_table(rdev, table);
2265 	if (ret)
2266 		return ret;
2267 
2268 	return 0;
2269 }
2270 
2271 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
2272 				  SMU7_Discrete_VoltageLevel *voltage)
2273 {
2274 	struct ci_power_info *pi = ci_get_pi(rdev);
2275 	u32 i = 0;
2276 
2277 	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2278 		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2279 			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2280 				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2281 				break;
2282 			}
2283 		}
2284 
2285 		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2286 			return -EINVAL;
2287 	}
2288 
2289 	return -EINVAL;
2290 }
2291 
2292 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
2293 					 struct atom_voltage_table_entry *voltage_table,
2294 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2295 {
2296 	u16 v_index, idx;
2297 	bool voltage_found = false;
2298 	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2299 	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2300 
2301 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2302 		return -EINVAL;
2303 
2304 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2305 		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2306 			if (voltage_table->value ==
2307 			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2308 				voltage_found = true;
2309 				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2310 					idx = v_index;
2311 				else
2312 					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2313 				*std_voltage_lo_sidd =
2314 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2315 				*std_voltage_hi_sidd =
2316 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2317 				break;
2318 			}
2319 		}
2320 
2321 		if (!voltage_found) {
2322 			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2323 				if (voltage_table->value <=
2324 				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2325 					voltage_found = true;
2326 					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2327 						idx = v_index;
2328 					else
2329 						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2330 					*std_voltage_lo_sidd =
2331 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2332 					*std_voltage_hi_sidd =
2333 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2334 					break;
2335 				}
2336 			}
2337 		}
2338 	}
2339 
2340 	return 0;
2341 }
2342 
2343 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
2344 						  const struct radeon_phase_shedding_limits_table *limits,
2345 						  u32 sclk,
2346 						  u32 *phase_shedding)
2347 {
2348 	unsigned int i;
2349 
2350 	*phase_shedding = 1;
2351 
2352 	for (i = 0; i < limits->count; i++) {
2353 		if (sclk < limits->entries[i].sclk) {
2354 			*phase_shedding = i;
2355 			break;
2356 		}
2357 	}
2358 }
2359 
2360 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
2361 						  const struct radeon_phase_shedding_limits_table *limits,
2362 						  u32 mclk,
2363 						  u32 *phase_shedding)
2364 {
2365 	unsigned int i;
2366 
2367 	*phase_shedding = 1;
2368 
2369 	for (i = 0; i < limits->count; i++) {
2370 		if (mclk < limits->entries[i].mclk) {
2371 			*phase_shedding = i;
2372 			break;
2373 		}
2374 	}
2375 }
2376 
2377 static int ci_init_arb_table_index(struct radeon_device *rdev)
2378 {
2379 	struct ci_power_info *pi = ci_get_pi(rdev);
2380 	u32 tmp;
2381 	int ret;
2382 
2383 	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2384 				     &tmp, pi->sram_end);
2385 	if (ret)
2386 		return ret;
2387 
2388 	tmp &= 0x00FFFFFF;
2389 	tmp |= MC_CG_ARB_FREQ_F1 << 24;
2390 
2391 	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2392 				       tmp, pi->sram_end);
2393 }
2394 
2395 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2396 					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2397 					 u32 clock, u32 *voltage)
2398 {
2399 	u32 i = 0;
2400 
2401 	if (allowed_clock_voltage_table->count == 0)
2402 		return -EINVAL;
2403 
2404 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2405 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2406 			*voltage = allowed_clock_voltage_table->entries[i].v;
2407 			return 0;
2408 		}
2409 	}
2410 
2411 	*voltage = allowed_clock_voltage_table->entries[i-1].v;
2412 
2413 	return 0;
2414 }
2415 
2416 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2417 					     u32 sclk, u32 min_sclk_in_sr)
2418 {
2419 	u32 i;
2420 	u32 tmp;
2421 	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2422 		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2423 
2424 	if (sclk < min)
2425 		return 0;
2426 
2427 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2428 		tmp = sclk / (1 << i);
2429 		if (tmp >= min || i == 0)
2430 			break;
2431 	}
2432 
2433 	return (u8)i;
2434 }
2435 
2436 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2437 {
2438 	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2439 }
2440 
2441 static int ci_reset_to_default(struct radeon_device *rdev)
2442 {
2443 	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2444 		0 : -EINVAL;
2445 }
2446 
2447 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2448 {
2449 	u32 tmp;
2450 
2451 	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2452 
2453 	if (tmp == MC_CG_ARB_FREQ_F0)
2454 		return 0;
2455 
2456 	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2457 }
2458 
2459 static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2460 					const u32 engine_clock,
2461 					const u32 memory_clock,
2462 					u32 *dram_timimg2)
2463 {
2464 	bool patch;
2465 	u32 tmp, tmp2;
2466 
2467 	tmp = RREG32(MC_SEQ_MISC0);
2468 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2469 
2470 	if (patch &&
2471 	    ((rdev->pdev->device == 0x67B0) ||
2472 	     (rdev->pdev->device == 0x67B1))) {
2473 		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2474 			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2475 			*dram_timimg2 &= ~0x00ff0000;
2476 			*dram_timimg2 |= tmp2 << 16;
2477 		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2478 			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2479 			*dram_timimg2 &= ~0x00ff0000;
2480 			*dram_timimg2 |= tmp2 << 16;
2481 		}
2482 	}
2483 }
2484 
2485 
2486 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2487 						u32 sclk,
2488 						u32 mclk,
2489 						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2490 {
2491 	u32 dram_timing;
2492 	u32 dram_timing2;
2493 	u32 burst_time;
2494 
2495 	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2496 
2497 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2498 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2499 	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2500 
2501 	ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2502 
2503 	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2504 	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2505 	arb_regs->McArbBurstTime = (u8)burst_time;
2506 
2507 	return 0;
2508 }
2509 
2510 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2511 {
2512 	struct ci_power_info *pi = ci_get_pi(rdev);
2513 	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2514 	u32 i, j;
2515 	int ret =  0;
2516 
2517 	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2518 
2519 	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2520 		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2521 			ret = ci_populate_memory_timing_parameters(rdev,
2522 								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2523 								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2524 								   &arb_regs.entries[i][j]);
2525 			if (ret)
2526 				break;
2527 		}
2528 	}
2529 
2530 	if (ret == 0)
2531 		ret = ci_copy_bytes_to_smc(rdev,
2532 					   pi->arb_table_start,
2533 					   (u8 *)&arb_regs,
2534 					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2535 					   pi->sram_end);
2536 
2537 	return ret;
2538 }
2539 
2540 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2541 {
2542 	struct ci_power_info *pi = ci_get_pi(rdev);
2543 
2544 	if (pi->need_update_smu7_dpm_table == 0)
2545 		return 0;
2546 
2547 	return ci_do_program_memory_timing_parameters(rdev);
2548 }
2549 
2550 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2551 					  struct radeon_ps *radeon_boot_state)
2552 {
2553 	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2554 	struct ci_power_info *pi = ci_get_pi(rdev);
2555 	u32 level = 0;
2556 
2557 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2558 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2559 		    boot_state->performance_levels[0].sclk) {
2560 			pi->smc_state_table.GraphicsBootLevel = level;
2561 			break;
2562 		}
2563 	}
2564 
2565 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2566 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2567 		    boot_state->performance_levels[0].mclk) {
2568 			pi->smc_state_table.MemoryBootLevel = level;
2569 			break;
2570 		}
2571 	}
2572 }
2573 
2574 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2575 {
2576 	u32 i;
2577 	u32 mask_value = 0;
2578 
2579 	for (i = dpm_table->count; i > 0; i--) {
2580 		mask_value = mask_value << 1;
2581 		if (dpm_table->dpm_levels[i-1].enabled)
2582 			mask_value |= 0x1;
2583 		else
2584 			mask_value &= 0xFFFFFFFE;
2585 	}
2586 
2587 	return mask_value;
2588 }
2589 
2590 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2591 				       SMU7_Discrete_DpmTable *table)
2592 {
2593 	struct ci_power_info *pi = ci_get_pi(rdev);
2594 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2595 	u32 i;
2596 
2597 	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2598 		table->LinkLevel[i].PcieGenSpeed =
2599 			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2600 		table->LinkLevel[i].PcieLaneCount =
2601 			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2602 		table->LinkLevel[i].EnabledForActivity = 1;
2603 		table->LinkLevel[i].DownT = cpu_to_be32(5);
2604 		table->LinkLevel[i].UpT = cpu_to_be32(30);
2605 	}
2606 
2607 	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2608 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2609 		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2610 }
2611 
2612 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2613 				     SMU7_Discrete_DpmTable *table)
2614 {
2615 	u32 count;
2616 	struct atom_clock_dividers dividers;
2617 	int ret = -EINVAL;
2618 
2619 	table->UvdLevelCount =
2620 		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2621 
2622 	for (count = 0; count < table->UvdLevelCount; count++) {
2623 		table->UvdLevel[count].VclkFrequency =
2624 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2625 		table->UvdLevel[count].DclkFrequency =
2626 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2627 		table->UvdLevel[count].MinVddc =
2628 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2629 		table->UvdLevel[count].MinVddcPhases = 1;
2630 
2631 		ret = radeon_atom_get_clock_dividers(rdev,
2632 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2633 						     table->UvdLevel[count].VclkFrequency, false, &dividers);
2634 		if (ret)
2635 			return ret;
2636 
2637 		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2638 
2639 		ret = radeon_atom_get_clock_dividers(rdev,
2640 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2641 						     table->UvdLevel[count].DclkFrequency, false, &dividers);
2642 		if (ret)
2643 			return ret;
2644 
2645 		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2646 
2647 		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2648 		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2649 		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2650 	}
2651 
2652 	return ret;
2653 }
2654 
2655 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2656 				     SMU7_Discrete_DpmTable *table)
2657 {
2658 	u32 count;
2659 	struct atom_clock_dividers dividers;
2660 	int ret = -EINVAL;
2661 
2662 	table->VceLevelCount =
2663 		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2664 
2665 	for (count = 0; count < table->VceLevelCount; count++) {
2666 		table->VceLevel[count].Frequency =
2667 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2668 		table->VceLevel[count].MinVoltage =
2669 			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2670 		table->VceLevel[count].MinPhases = 1;
2671 
2672 		ret = radeon_atom_get_clock_dividers(rdev,
2673 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2674 						     table->VceLevel[count].Frequency, false, &dividers);
2675 		if (ret)
2676 			return ret;
2677 
2678 		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2679 
2680 		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2681 		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2682 	}
2683 
2684 	return ret;
2685 
2686 }
2687 
2688 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2689 				     SMU7_Discrete_DpmTable *table)
2690 {
2691 	u32 count;
2692 	struct atom_clock_dividers dividers;
2693 	int ret = -EINVAL;
2694 
2695 	table->AcpLevelCount = (u8)
2696 		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2697 
2698 	for (count = 0; count < table->AcpLevelCount; count++) {
2699 		table->AcpLevel[count].Frequency =
2700 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2701 		table->AcpLevel[count].MinVoltage =
2702 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2703 		table->AcpLevel[count].MinPhases = 1;
2704 
2705 		ret = radeon_atom_get_clock_dividers(rdev,
2706 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2707 						     table->AcpLevel[count].Frequency, false, &dividers);
2708 		if (ret)
2709 			return ret;
2710 
2711 		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2712 
2713 		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2714 		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2715 	}
2716 
2717 	return ret;
2718 }
2719 
2720 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2721 				      SMU7_Discrete_DpmTable *table)
2722 {
2723 	u32 count;
2724 	struct atom_clock_dividers dividers;
2725 	int ret = -EINVAL;
2726 
2727 	table->SamuLevelCount =
2728 		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2729 
2730 	for (count = 0; count < table->SamuLevelCount; count++) {
2731 		table->SamuLevel[count].Frequency =
2732 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2733 		table->SamuLevel[count].MinVoltage =
2734 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2735 		table->SamuLevel[count].MinPhases = 1;
2736 
2737 		ret = radeon_atom_get_clock_dividers(rdev,
2738 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2739 						     table->SamuLevel[count].Frequency, false, &dividers);
2740 		if (ret)
2741 			return ret;
2742 
2743 		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2744 
2745 		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2746 		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2747 	}
2748 
2749 	return ret;
2750 }
2751 
2752 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2753 				    u32 memory_clock,
2754 				    SMU7_Discrete_MemoryLevel *mclk,
2755 				    bool strobe_mode,
2756 				    bool dll_state_on)
2757 {
2758 	struct ci_power_info *pi = ci_get_pi(rdev);
2759 	u32  dll_cntl = pi->clock_registers.dll_cntl;
2760 	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2761 	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2762 	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2763 	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2764 	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2765 	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2766 	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2767 	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2768 	struct atom_mpll_param mpll_param;
2769 	int ret;
2770 
2771 	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2772 	if (ret)
2773 		return ret;
2774 
2775 	mpll_func_cntl &= ~BWCTRL_MASK;
2776 	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2777 
2778 	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2779 	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2780 		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2781 
2782 	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2783 	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2784 
2785 	if (pi->mem_gddr5) {
2786 		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2787 		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2788 			YCLK_POST_DIV(mpll_param.post_div);
2789 	}
2790 
2791 	if (pi->caps_mclk_ss_support) {
2792 		struct radeon_atom_ss ss;
2793 		u32 freq_nom;
2794 		u32 tmp;
2795 		u32 reference_clock = rdev->clock.mpll.reference_freq;
2796 
2797 		if (mpll_param.qdr == 1)
2798 			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2799 		else
2800 			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2801 
2802 		tmp = (freq_nom / reference_clock);
2803 		tmp = tmp * tmp;
2804 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2805 						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2806 			u32 clks = reference_clock * 5 / ss.rate;
2807 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2808 
2809 			mpll_ss1 &= ~CLKV_MASK;
2810 			mpll_ss1 |= CLKV(clkv);
2811 
2812 			mpll_ss2 &= ~CLKS_MASK;
2813 			mpll_ss2 |= CLKS(clks);
2814 		}
2815 	}
2816 
2817 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2818 	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2819 
2820 	if (dll_state_on)
2821 		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2822 	else
2823 		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2824 
2825 	mclk->MclkFrequency = memory_clock;
2826 	mclk->MpllFuncCntl = mpll_func_cntl;
2827 	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2828 	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2829 	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2830 	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2831 	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2832 	mclk->DllCntl = dll_cntl;
2833 	mclk->MpllSs1 = mpll_ss1;
2834 	mclk->MpllSs2 = mpll_ss2;
2835 
2836 	return 0;
2837 }
2838 
2839 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2840 					   u32 memory_clock,
2841 					   SMU7_Discrete_MemoryLevel *memory_level)
2842 {
2843 	struct ci_power_info *pi = ci_get_pi(rdev);
2844 	int ret;
2845 	bool dll_state_on;
2846 
2847 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2848 		ret = ci_get_dependency_volt_by_clk(rdev,
2849 						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2850 						    memory_clock, &memory_level->MinVddc);
2851 		if (ret)
2852 			return ret;
2853 	}
2854 
2855 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2856 		ret = ci_get_dependency_volt_by_clk(rdev,
2857 						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2858 						    memory_clock, &memory_level->MinVddci);
2859 		if (ret)
2860 			return ret;
2861 	}
2862 
2863 	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2864 		ret = ci_get_dependency_volt_by_clk(rdev,
2865 						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2866 						    memory_clock, &memory_level->MinMvdd);
2867 		if (ret)
2868 			return ret;
2869 	}
2870 
2871 	memory_level->MinVddcPhases = 1;
2872 
2873 	if (pi->vddc_phase_shed_control)
2874 		ci_populate_phase_value_based_on_mclk(rdev,
2875 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2876 						      memory_clock,
2877 						      &memory_level->MinVddcPhases);
2878 
2879 	memory_level->EnabledForThrottle = 1;
2880 	memory_level->UpH = 0;
2881 	memory_level->DownH = 100;
2882 	memory_level->VoltageDownH = 0;
2883 	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2884 
2885 	memory_level->StutterEnable = false;
2886 	memory_level->StrobeEnable = false;
2887 	memory_level->EdcReadEnable = false;
2888 	memory_level->EdcWriteEnable = false;
2889 	memory_level->RttEnable = false;
2890 
2891 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2892 
2893 	if (pi->mclk_stutter_mode_threshold &&
2894 	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2895 	    (pi->uvd_enabled == false) &&
2896 	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2897 	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2898 		memory_level->StutterEnable = true;
2899 
2900 	if (pi->mclk_strobe_mode_threshold &&
2901 	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2902 		memory_level->StrobeEnable = 1;
2903 
2904 	if (pi->mem_gddr5) {
2905 		memory_level->StrobeRatio =
2906 			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2907 		if (pi->mclk_edc_enable_threshold &&
2908 		    (memory_clock > pi->mclk_edc_enable_threshold))
2909 			memory_level->EdcReadEnable = true;
2910 
2911 		if (pi->mclk_edc_wr_enable_threshold &&
2912 		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2913 			memory_level->EdcWriteEnable = true;
2914 
2915 		if (memory_level->StrobeEnable) {
2916 			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2917 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2918 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2919 			else
2920 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2921 		} else {
2922 			dll_state_on = pi->dll_default_on;
2923 		}
2924 	} else {
2925 		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2926 		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2927 	}
2928 
2929 	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2930 	if (ret)
2931 		return ret;
2932 
2933 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2934 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2935 	memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2936 	memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2937 
2938 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2939 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2940 	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2941 	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2942 	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2943 	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2944 	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2945 	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2946 	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2947 	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2948 	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2949 
2950 	return 0;
2951 }
2952 
2953 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2954 				      SMU7_Discrete_DpmTable *table)
2955 {
2956 	struct ci_power_info *pi = ci_get_pi(rdev);
2957 	struct atom_clock_dividers dividers;
2958 	SMU7_Discrete_VoltageLevel voltage_level;
2959 	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2960 	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2961 	u32 dll_cntl = pi->clock_registers.dll_cntl;
2962 	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2963 	int ret;
2964 
2965 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2966 
2967 	if (pi->acpi_vddc)
2968 		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2969 	else
2970 		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2971 
2972 	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2973 
2974 	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2975 
2976 	ret = radeon_atom_get_clock_dividers(rdev,
2977 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2978 					     table->ACPILevel.SclkFrequency, false, &dividers);
2979 	if (ret)
2980 		return ret;
2981 
2982 	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2983 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2984 	table->ACPILevel.DeepSleepDivId = 0;
2985 
2986 	spll_func_cntl &= ~SPLL_PWRON;
2987 	spll_func_cntl |= SPLL_RESET;
2988 
2989 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2990 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2991 
2992 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2993 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2994 	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2995 	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2996 	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2997 	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2998 	table->ACPILevel.CcPwrDynRm = 0;
2999 	table->ACPILevel.CcPwrDynRm1 = 0;
3000 
3001 	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3002 	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3003 	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3004 	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3005 	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3006 	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3007 	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3008 	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3009 	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3010 	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3011 	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3012 
3013 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3014 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3015 
3016 	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3017 		if (pi->acpi_vddci)
3018 			table->MemoryACPILevel.MinVddci =
3019 				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3020 		else
3021 			table->MemoryACPILevel.MinVddci =
3022 				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3023 	}
3024 
3025 	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
3026 		table->MemoryACPILevel.MinMvdd = 0;
3027 	else
3028 		table->MemoryACPILevel.MinMvdd =
3029 			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3030 
3031 	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
3032 	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
3033 
3034 	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
3035 
3036 	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3037 	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3038 	table->MemoryACPILevel.MpllAdFuncCntl =
3039 		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3040 	table->MemoryACPILevel.MpllDqFuncCntl =
3041 		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3042 	table->MemoryACPILevel.MpllFuncCntl =
3043 		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3044 	table->MemoryACPILevel.MpllFuncCntl_1 =
3045 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3046 	table->MemoryACPILevel.MpllFuncCntl_2 =
3047 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3048 	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3049 	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3050 
3051 	table->MemoryACPILevel.EnabledForThrottle = 0;
3052 	table->MemoryACPILevel.EnabledForActivity = 0;
3053 	table->MemoryACPILevel.UpH = 0;
3054 	table->MemoryACPILevel.DownH = 100;
3055 	table->MemoryACPILevel.VoltageDownH = 0;
3056 	table->MemoryACPILevel.ActivityLevel =
3057 		cpu_to_be16((u16)pi->mclk_activity_target);
3058 
3059 	table->MemoryACPILevel.StutterEnable = false;
3060 	table->MemoryACPILevel.StrobeEnable = false;
3061 	table->MemoryACPILevel.EdcReadEnable = false;
3062 	table->MemoryACPILevel.EdcWriteEnable = false;
3063 	table->MemoryACPILevel.RttEnable = false;
3064 
3065 	return 0;
3066 }
3067 
3068 
3069 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
3070 {
3071 	struct ci_power_info *pi = ci_get_pi(rdev);
3072 	struct ci_ulv_parm *ulv = &pi->ulv;
3073 
3074 	if (ulv->supported) {
3075 		if (enable)
3076 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3077 				0 : -EINVAL;
3078 		else
3079 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3080 				0 : -EINVAL;
3081 	}
3082 
3083 	return 0;
3084 }
3085 
3086 static int ci_populate_ulv_level(struct radeon_device *rdev,
3087 				 SMU7_Discrete_Ulv *state)
3088 {
3089 	struct ci_power_info *pi = ci_get_pi(rdev);
3090 	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
3091 
3092 	state->CcPwrDynRm = 0;
3093 	state->CcPwrDynRm1 = 0;
3094 
3095 	if (ulv_voltage == 0) {
3096 		pi->ulv.supported = false;
3097 		return 0;
3098 	}
3099 
3100 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3101 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3102 			state->VddcOffset = 0;
3103 		else
3104 			state->VddcOffset =
3105 				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3106 	} else {
3107 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3108 			state->VddcOffsetVid = 0;
3109 		else
3110 			state->VddcOffsetVid = (u8)
3111 				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3112 				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3113 	}
3114 	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3115 
3116 	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3117 	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3118 	state->VddcOffset = cpu_to_be16(state->VddcOffset);
3119 
3120 	return 0;
3121 }
3122 
3123 static int ci_calculate_sclk_params(struct radeon_device *rdev,
3124 				    u32 engine_clock,
3125 				    SMU7_Discrete_GraphicsLevel *sclk)
3126 {
3127 	struct ci_power_info *pi = ci_get_pi(rdev);
3128 	struct atom_clock_dividers dividers;
3129 	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3130 	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3131 	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3132 	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3133 	u32 reference_clock = rdev->clock.spll.reference_freq;
3134 	u32 reference_divider;
3135 	u32 fbdiv;
3136 	int ret;
3137 
3138 	ret = radeon_atom_get_clock_dividers(rdev,
3139 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3140 					     engine_clock, false, &dividers);
3141 	if (ret)
3142 		return ret;
3143 
3144 	reference_divider = 1 + dividers.ref_div;
3145 	fbdiv = dividers.fb_div & 0x3FFFFFF;
3146 
3147 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
3148 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
3149 	spll_func_cntl_3 |= SPLL_DITHEN;
3150 
3151 	if (pi->caps_sclk_ss_support) {
3152 		struct radeon_atom_ss ss;
3153 		u32 vco_freq = engine_clock * dividers.post_div;
3154 
3155 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
3156 						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3157 			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3158 			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3159 
3160 			cg_spll_spread_spectrum &= ~CLK_S_MASK;
3161 			cg_spll_spread_spectrum |= CLK_S(clk_s);
3162 			cg_spll_spread_spectrum |= SSEN;
3163 
3164 			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
3165 			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
3166 		}
3167 	}
3168 
3169 	sclk->SclkFrequency = engine_clock;
3170 	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3171 	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3172 	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3173 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3174 	sclk->SclkDid = (u8)dividers.post_divider;
3175 
3176 	return 0;
3177 }
3178 
3179 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
3180 					    u32 engine_clock,
3181 					    u16 sclk_activity_level_t,
3182 					    SMU7_Discrete_GraphicsLevel *graphic_level)
3183 {
3184 	struct ci_power_info *pi = ci_get_pi(rdev);
3185 	int ret;
3186 
3187 	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
3188 	if (ret)
3189 		return ret;
3190 
3191 	ret = ci_get_dependency_volt_by_clk(rdev,
3192 					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3193 					    engine_clock, &graphic_level->MinVddc);
3194 	if (ret)
3195 		return ret;
3196 
3197 	graphic_level->SclkFrequency = engine_clock;
3198 
3199 	graphic_level->Flags =  0;
3200 	graphic_level->MinVddcPhases = 1;
3201 
3202 	if (pi->vddc_phase_shed_control)
3203 		ci_populate_phase_value_based_on_sclk(rdev,
3204 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
3205 						      engine_clock,
3206 						      &graphic_level->MinVddcPhases);
3207 
3208 	graphic_level->ActivityLevel = sclk_activity_level_t;
3209 
3210 	graphic_level->CcPwrDynRm = 0;
3211 	graphic_level->CcPwrDynRm1 = 0;
3212 	graphic_level->EnabledForThrottle = 1;
3213 	graphic_level->UpH = 0;
3214 	graphic_level->DownH = 0;
3215 	graphic_level->VoltageDownH = 0;
3216 	graphic_level->PowerThrottle = 0;
3217 
3218 	if (pi->caps_sclk_ds)
3219 		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
3220 										   engine_clock,
3221 										   CISLAND_MINIMUM_ENGINE_CLOCK);
3222 
3223 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3224 
3225 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3226 	graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3227 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3228 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3229 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3230 	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3231 	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3232 	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3233 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3234 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3235 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3236 
3237 	return 0;
3238 }
3239 
3240 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
3241 {
3242 	struct ci_power_info *pi = ci_get_pi(rdev);
3243 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3244 	u32 level_array_address = pi->dpm_table_start +
3245 		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3246 	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3247 		SMU7_MAX_LEVELS_GRAPHICS;
3248 	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3249 	u32 i, ret;
3250 
3251 	memset(levels, 0, level_array_size);
3252 
3253 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
3254 		ret = ci_populate_single_graphic_level(rdev,
3255 						       dpm_table->sclk_table.dpm_levels[i].value,
3256 						       (u16)pi->activity_target[i],
3257 						       &pi->smc_state_table.GraphicsLevel[i]);
3258 		if (ret)
3259 			return ret;
3260 		if (i > 1)
3261 			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3262 		if (i == (dpm_table->sclk_table.count - 1))
3263 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3264 				PPSMC_DISPLAY_WATERMARK_HIGH;
3265 	}
3266 	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3267 
3268 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3269 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3270 		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3271 
3272 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3273 				   (u8 *)levels, level_array_size,
3274 				   pi->sram_end);
3275 	if (ret)
3276 		return ret;
3277 
3278 	return 0;
3279 }
3280 
3281 static int ci_populate_ulv_state(struct radeon_device *rdev,
3282 				 SMU7_Discrete_Ulv *ulv_level)
3283 {
3284 	return ci_populate_ulv_level(rdev, ulv_level);
3285 }
3286 
3287 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
3288 {
3289 	struct ci_power_info *pi = ci_get_pi(rdev);
3290 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3291 	u32 level_array_address = pi->dpm_table_start +
3292 		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3293 	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3294 		SMU7_MAX_LEVELS_MEMORY;
3295 	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3296 	u32 i, ret;
3297 
3298 	memset(levels, 0, level_array_size);
3299 
3300 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
3301 		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3302 			return -EINVAL;
3303 		ret = ci_populate_single_memory_level(rdev,
3304 						      dpm_table->mclk_table.dpm_levels[i].value,
3305 						      &pi->smc_state_table.MemoryLevel[i]);
3306 		if (ret)
3307 			return ret;
3308 	}
3309 
3310 	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3311 
3312 	if ((dpm_table->mclk_table.count >= 2) &&
3313 	    ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3314 		pi->smc_state_table.MemoryLevel[1].MinVddc =
3315 			pi->smc_state_table.MemoryLevel[0].MinVddc;
3316 		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3317 			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3318 	}
3319 
3320 	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3321 
3322 	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3323 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3324 		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3325 
3326 	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3327 		PPSMC_DISPLAY_WATERMARK_HIGH;
3328 
3329 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3330 				   (u8 *)levels, level_array_size,
3331 				   pi->sram_end);
3332 	if (ret)
3333 		return ret;
3334 
3335 	return 0;
3336 }
3337 
3338 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
3339 				      struct ci_single_dpm_table* dpm_table,
3340 				      u32 count)
3341 {
3342 	u32 i;
3343 
3344 	dpm_table->count = count;
3345 	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3346 		dpm_table->dpm_levels[i].enabled = false;
3347 }
3348 
3349 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3350 				      u32 index, u32 pcie_gen, u32 pcie_lanes)
3351 {
3352 	dpm_table->dpm_levels[index].value = pcie_gen;
3353 	dpm_table->dpm_levels[index].param1 = pcie_lanes;
3354 	dpm_table->dpm_levels[index].enabled = true;
3355 }
3356 
3357 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
3358 {
3359 	struct ci_power_info *pi = ci_get_pi(rdev);
3360 
3361 	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3362 		return -EINVAL;
3363 
3364 	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3365 		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3366 		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3367 	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3368 		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3369 		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3370 	}
3371 
3372 	ci_reset_single_dpm_table(rdev,
3373 				  &pi->dpm_table.pcie_speed_table,
3374 				  SMU7_MAX_LEVELS_LINK);
3375 
3376 	if (rdev->family == CHIP_BONAIRE)
3377 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3378 					  pi->pcie_gen_powersaving.min,
3379 					  pi->pcie_lane_powersaving.max);
3380 	else
3381 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3382 					  pi->pcie_gen_powersaving.min,
3383 					  pi->pcie_lane_powersaving.min);
3384 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3385 				  pi->pcie_gen_performance.min,
3386 				  pi->pcie_lane_performance.min);
3387 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3388 				  pi->pcie_gen_powersaving.min,
3389 				  pi->pcie_lane_powersaving.max);
3390 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3391 				  pi->pcie_gen_performance.min,
3392 				  pi->pcie_lane_performance.max);
3393 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3394 				  pi->pcie_gen_powersaving.max,
3395 				  pi->pcie_lane_powersaving.max);
3396 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3397 				  pi->pcie_gen_performance.max,
3398 				  pi->pcie_lane_performance.max);
3399 
3400 	pi->dpm_table.pcie_speed_table.count = 6;
3401 
3402 	return 0;
3403 }
3404 
3405 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3406 {
3407 	struct ci_power_info *pi = ci_get_pi(rdev);
3408 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3409 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3410 	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3411 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3412 	struct radeon_cac_leakage_table *std_voltage_table =
3413 		&rdev->pm.dpm.dyn_state.cac_leakage_table;
3414 	u32 i;
3415 
3416 	if (allowed_sclk_vddc_table == NULL)
3417 		return -EINVAL;
3418 	if (allowed_sclk_vddc_table->count < 1)
3419 		return -EINVAL;
3420 	if (allowed_mclk_table == NULL)
3421 		return -EINVAL;
3422 	if (allowed_mclk_table->count < 1)
3423 		return -EINVAL;
3424 
3425 	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3426 
3427 	ci_reset_single_dpm_table(rdev,
3428 				  &pi->dpm_table.sclk_table,
3429 				  SMU7_MAX_LEVELS_GRAPHICS);
3430 	ci_reset_single_dpm_table(rdev,
3431 				  &pi->dpm_table.mclk_table,
3432 				  SMU7_MAX_LEVELS_MEMORY);
3433 	ci_reset_single_dpm_table(rdev,
3434 				  &pi->dpm_table.vddc_table,
3435 				  SMU7_MAX_LEVELS_VDDC);
3436 	ci_reset_single_dpm_table(rdev,
3437 				  &pi->dpm_table.vddci_table,
3438 				  SMU7_MAX_LEVELS_VDDCI);
3439 	ci_reset_single_dpm_table(rdev,
3440 				  &pi->dpm_table.mvdd_table,
3441 				  SMU7_MAX_LEVELS_MVDD);
3442 
3443 	pi->dpm_table.sclk_table.count = 0;
3444 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3445 		if ((i == 0) ||
3446 		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3447 		     allowed_sclk_vddc_table->entries[i].clk)) {
3448 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3449 				allowed_sclk_vddc_table->entries[i].clk;
3450 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3451 				(i == 0) ? true : false;
3452 			pi->dpm_table.sclk_table.count++;
3453 		}
3454 	}
3455 
3456 	pi->dpm_table.mclk_table.count = 0;
3457 	for (i = 0; i < allowed_mclk_table->count; i++) {
3458 		if ((i == 0) ||
3459 		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3460 		     allowed_mclk_table->entries[i].clk)) {
3461 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3462 				allowed_mclk_table->entries[i].clk;
3463 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3464 				(i == 0) ? true : false;
3465 			pi->dpm_table.mclk_table.count++;
3466 		}
3467 	}
3468 
3469 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3470 		pi->dpm_table.vddc_table.dpm_levels[i].value =
3471 			allowed_sclk_vddc_table->entries[i].v;
3472 		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3473 			std_voltage_table->entries[i].leakage;
3474 		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3475 	}
3476 	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3477 
3478 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3479 	if (allowed_mclk_table) {
3480 		for (i = 0; i < allowed_mclk_table->count; i++) {
3481 			pi->dpm_table.vddci_table.dpm_levels[i].value =
3482 				allowed_mclk_table->entries[i].v;
3483 			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3484 		}
3485 		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3486 	}
3487 
3488 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3489 	if (allowed_mclk_table) {
3490 		for (i = 0; i < allowed_mclk_table->count; i++) {
3491 			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3492 				allowed_mclk_table->entries[i].v;
3493 			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3494 		}
3495 		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3496 	}
3497 
3498 	ci_setup_default_pcie_tables(rdev);
3499 
3500 	return 0;
3501 }
3502 
3503 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3504 			      u32 value, u32 *boot_level)
3505 {
3506 	u32 i;
3507 	int ret = -EINVAL;
3508 
3509 	for(i = 0; i < table->count; i++) {
3510 		if (value == table->dpm_levels[i].value) {
3511 			*boot_level = i;
3512 			ret = 0;
3513 		}
3514 	}
3515 
3516 	return ret;
3517 }
3518 
3519 static int ci_init_smc_table(struct radeon_device *rdev)
3520 {
3521 	struct ci_power_info *pi = ci_get_pi(rdev);
3522 	struct ci_ulv_parm *ulv = &pi->ulv;
3523 	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3524 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3525 	int ret;
3526 
3527 	ret = ci_setup_default_dpm_tables(rdev);
3528 	if (ret)
3529 		return ret;
3530 
3531 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3532 		ci_populate_smc_voltage_tables(rdev, table);
3533 
3534 	ci_init_fps_limits(rdev);
3535 
3536 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3537 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3538 
3539 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3540 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3541 
3542 	if (pi->mem_gddr5)
3543 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3544 
3545 	if (ulv->supported) {
3546 		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3547 		if (ret)
3548 			return ret;
3549 		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3550 	}
3551 
3552 	ret = ci_populate_all_graphic_levels(rdev);
3553 	if (ret)
3554 		return ret;
3555 
3556 	ret = ci_populate_all_memory_levels(rdev);
3557 	if (ret)
3558 		return ret;
3559 
3560 	ci_populate_smc_link_level(rdev, table);
3561 
3562 	ret = ci_populate_smc_acpi_level(rdev, table);
3563 	if (ret)
3564 		return ret;
3565 
3566 	ret = ci_populate_smc_vce_level(rdev, table);
3567 	if (ret)
3568 		return ret;
3569 
3570 	ret = ci_populate_smc_acp_level(rdev, table);
3571 	if (ret)
3572 		return ret;
3573 
3574 	ret = ci_populate_smc_samu_level(rdev, table);
3575 	if (ret)
3576 		return ret;
3577 
3578 	ret = ci_do_program_memory_timing_parameters(rdev);
3579 	if (ret)
3580 		return ret;
3581 
3582 	ret = ci_populate_smc_uvd_level(rdev, table);
3583 	if (ret)
3584 		return ret;
3585 
3586 	table->UvdBootLevel  = 0;
3587 	table->VceBootLevel  = 0;
3588 	table->AcpBootLevel  = 0;
3589 	table->SamuBootLevel  = 0;
3590 	table->GraphicsBootLevel  = 0;
3591 	table->MemoryBootLevel  = 0;
3592 
3593 	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3594 				 pi->vbios_boot_state.sclk_bootup_value,
3595 				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3596 
3597 	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3598 				 pi->vbios_boot_state.mclk_bootup_value,
3599 				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3600 
3601 	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3602 	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3603 	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3604 
3605 	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3606 
3607 	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3608 	if (ret)
3609 		return ret;
3610 
3611 	table->UVDInterval = 1;
3612 	table->VCEInterval = 1;
3613 	table->ACPInterval = 1;
3614 	table->SAMUInterval = 1;
3615 	table->GraphicsVoltageChangeEnable = 1;
3616 	table->GraphicsThermThrottleEnable = 1;
3617 	table->GraphicsInterval = 1;
3618 	table->VoltageInterval = 1;
3619 	table->ThermalInterval = 1;
3620 	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3621 					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3622 	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3623 					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3624 	table->MemoryVoltageChangeEnable = 1;
3625 	table->MemoryInterval = 1;
3626 	table->VoltageResponseTime = 0;
3627 	table->VddcVddciDelta = 4000;
3628 	table->PhaseResponseTime = 0;
3629 	table->MemoryThermThrottleEnable = 1;
3630 	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3631 	table->PCIeGenInterval = 1;
3632 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3633 		table->SVI2Enable  = 1;
3634 	else
3635 		table->SVI2Enable  = 0;
3636 
3637 	table->ThermGpio = 17;
3638 	table->SclkStepSize = 0x4000;
3639 
3640 	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3641 	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3642 	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3643 	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3644 	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3645 	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3646 	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3647 	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3648 	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3649 	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3650 	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3651 	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3652 	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3653 	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3654 
3655 	ret = ci_copy_bytes_to_smc(rdev,
3656 				   pi->dpm_table_start +
3657 				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3658 				   (u8 *)&table->SystemFlags,
3659 				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3660 				   pi->sram_end);
3661 	if (ret)
3662 		return ret;
3663 
3664 	return 0;
3665 }
3666 
3667 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3668 				      struct ci_single_dpm_table *dpm_table,
3669 				      u32 low_limit, u32 high_limit)
3670 {
3671 	u32 i;
3672 
3673 	for (i = 0; i < dpm_table->count; i++) {
3674 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3675 		    (dpm_table->dpm_levels[i].value > high_limit))
3676 			dpm_table->dpm_levels[i].enabled = false;
3677 		else
3678 			dpm_table->dpm_levels[i].enabled = true;
3679 	}
3680 }
3681 
3682 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3683 				    u32 speed_low, u32 lanes_low,
3684 				    u32 speed_high, u32 lanes_high)
3685 {
3686 	struct ci_power_info *pi = ci_get_pi(rdev);
3687 	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3688 	u32 i, j;
3689 
3690 	for (i = 0; i < pcie_table->count; i++) {
3691 		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3692 		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3693 		    (pcie_table->dpm_levels[i].value > speed_high) ||
3694 		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3695 			pcie_table->dpm_levels[i].enabled = false;
3696 		else
3697 			pcie_table->dpm_levels[i].enabled = true;
3698 	}
3699 
3700 	for (i = 0; i < pcie_table->count; i++) {
3701 		if (pcie_table->dpm_levels[i].enabled) {
3702 			for (j = i + 1; j < pcie_table->count; j++) {
3703 				if (pcie_table->dpm_levels[j].enabled) {
3704 					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3705 					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3706 						pcie_table->dpm_levels[j].enabled = false;
3707 				}
3708 			}
3709 		}
3710 	}
3711 }
3712 
3713 static int ci_trim_dpm_states(struct radeon_device *rdev,
3714 			      struct radeon_ps *radeon_state)
3715 {
3716 	struct ci_ps *state = ci_get_ps(radeon_state);
3717 	struct ci_power_info *pi = ci_get_pi(rdev);
3718 	u32 high_limit_count;
3719 
3720 	if (state->performance_level_count < 1)
3721 		return -EINVAL;
3722 
3723 	if (state->performance_level_count == 1)
3724 		high_limit_count = 0;
3725 	else
3726 		high_limit_count = 1;
3727 
3728 	ci_trim_single_dpm_states(rdev,
3729 				  &pi->dpm_table.sclk_table,
3730 				  state->performance_levels[0].sclk,
3731 				  state->performance_levels[high_limit_count].sclk);
3732 
3733 	ci_trim_single_dpm_states(rdev,
3734 				  &pi->dpm_table.mclk_table,
3735 				  state->performance_levels[0].mclk,
3736 				  state->performance_levels[high_limit_count].mclk);
3737 
3738 	ci_trim_pcie_dpm_states(rdev,
3739 				state->performance_levels[0].pcie_gen,
3740 				state->performance_levels[0].pcie_lane,
3741 				state->performance_levels[high_limit_count].pcie_gen,
3742 				state->performance_levels[high_limit_count].pcie_lane);
3743 
3744 	return 0;
3745 }
3746 
3747 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3748 {
3749 	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3750 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3751 	struct radeon_clock_voltage_dependency_table *vddc_table =
3752 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3753 	u32 requested_voltage = 0;
3754 	u32 i;
3755 
3756 	if (disp_voltage_table == NULL)
3757 		return -EINVAL;
3758 	if (!disp_voltage_table->count)
3759 		return -EINVAL;
3760 
3761 	for (i = 0; i < disp_voltage_table->count; i++) {
3762 		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3763 			requested_voltage = disp_voltage_table->entries[i].v;
3764 	}
3765 
3766 	for (i = 0; i < vddc_table->count; i++) {
3767 		if (requested_voltage <= vddc_table->entries[i].v) {
3768 			requested_voltage = vddc_table->entries[i].v;
3769 			return (ci_send_msg_to_smc_with_parameter(rdev,
3770 								  PPSMC_MSG_VddC_Request,
3771 								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3772 				0 : -EINVAL;
3773 		}
3774 	}
3775 
3776 	return -EINVAL;
3777 }
3778 
3779 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3780 {
3781 	struct ci_power_info *pi = ci_get_pi(rdev);
3782 	PPSMC_Result result;
3783 
3784 	ci_apply_disp_minimum_voltage_request(rdev);
3785 
3786 	if (!pi->sclk_dpm_key_disabled) {
3787 		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3788 			result = ci_send_msg_to_smc_with_parameter(rdev,
3789 								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3790 								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3791 			if (result != PPSMC_Result_OK)
3792 				return -EINVAL;
3793 		}
3794 	}
3795 
3796 	if (!pi->mclk_dpm_key_disabled) {
3797 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3798 			result = ci_send_msg_to_smc_with_parameter(rdev,
3799 								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3800 								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3801 			if (result != PPSMC_Result_OK)
3802 				return -EINVAL;
3803 		}
3804 	}
3805 #if 0
3806 	if (!pi->pcie_dpm_key_disabled) {
3807 		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3808 			result = ci_send_msg_to_smc_with_parameter(rdev,
3809 								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3810 								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3811 			if (result != PPSMC_Result_OK)
3812 				return -EINVAL;
3813 		}
3814 	}
3815 #endif
3816 	return 0;
3817 }
3818 
3819 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3820 						   struct radeon_ps *radeon_state)
3821 {
3822 	struct ci_power_info *pi = ci_get_pi(rdev);
3823 	struct ci_ps *state = ci_get_ps(radeon_state);
3824 	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3825 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3826 	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3827 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3828 	u32 i;
3829 
3830 	pi->need_update_smu7_dpm_table = 0;
3831 
3832 	for (i = 0; i < sclk_table->count; i++) {
3833 		if (sclk == sclk_table->dpm_levels[i].value)
3834 			break;
3835 	}
3836 
3837 	if (i >= sclk_table->count) {
3838 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3839 	} else {
3840 		/* XXX The current code always reprogrammed the sclk levels,
3841 		 * but we don't currently handle disp sclk requirements
3842 		 * so just skip it.
3843 		 */
3844 		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3845 			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3846 	}
3847 
3848 	for (i = 0; i < mclk_table->count; i++) {
3849 		if (mclk == mclk_table->dpm_levels[i].value)
3850 			break;
3851 	}
3852 
3853 	if (i >= mclk_table->count)
3854 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3855 
3856 	if (rdev->pm.dpm.current_active_crtc_count !=
3857 	    rdev->pm.dpm.new_active_crtc_count)
3858 		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3859 }
3860 
3861 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3862 						       struct radeon_ps *radeon_state)
3863 {
3864 	struct ci_power_info *pi = ci_get_pi(rdev);
3865 	struct ci_ps *state = ci_get_ps(radeon_state);
3866 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3867 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3868 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3869 	int ret;
3870 
3871 	if (!pi->need_update_smu7_dpm_table)
3872 		return 0;
3873 
3874 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3875 		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3876 
3877 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3878 		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3879 
3880 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3881 		ret = ci_populate_all_graphic_levels(rdev);
3882 		if (ret)
3883 			return ret;
3884 	}
3885 
3886 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3887 		ret = ci_populate_all_memory_levels(rdev);
3888 		if (ret)
3889 			return ret;
3890 	}
3891 
3892 	return 0;
3893 }
3894 
3895 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3896 {
3897 	struct ci_power_info *pi = ci_get_pi(rdev);
3898 	const struct radeon_clock_and_voltage_limits *max_limits;
3899 	int i;
3900 
3901 	if (rdev->pm.dpm.ac_power)
3902 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3903 	else
3904 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3905 
3906 	if (enable) {
3907 		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3908 
3909 		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3910 			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3911 				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3912 
3913 				if (!pi->caps_uvd_dpm)
3914 					break;
3915 			}
3916 		}
3917 
3918 		ci_send_msg_to_smc_with_parameter(rdev,
3919 						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3920 						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3921 
3922 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3923 			pi->uvd_enabled = true;
3924 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3925 			ci_send_msg_to_smc_with_parameter(rdev,
3926 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3927 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3928 		}
3929 	} else {
3930 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3931 			pi->uvd_enabled = false;
3932 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3933 			ci_send_msg_to_smc_with_parameter(rdev,
3934 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3935 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3936 		}
3937 	}
3938 
3939 	return (ci_send_msg_to_smc(rdev, enable ?
3940 				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3941 		0 : -EINVAL;
3942 }
3943 
3944 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3945 {
3946 	struct ci_power_info *pi = ci_get_pi(rdev);
3947 	const struct radeon_clock_and_voltage_limits *max_limits;
3948 	int i;
3949 
3950 	if (rdev->pm.dpm.ac_power)
3951 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3952 	else
3953 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3954 
3955 	if (enable) {
3956 		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3957 		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3958 			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3959 				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3960 
3961 				if (!pi->caps_vce_dpm)
3962 					break;
3963 			}
3964 		}
3965 
3966 		ci_send_msg_to_smc_with_parameter(rdev,
3967 						  PPSMC_MSG_VCEDPM_SetEnabledMask,
3968 						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3969 	}
3970 
3971 	return (ci_send_msg_to_smc(rdev, enable ?
3972 				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3973 		0 : -EINVAL;
3974 }
3975 
3976 #if 0
3977 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3978 {
3979 	struct ci_power_info *pi = ci_get_pi(rdev);
3980 	const struct radeon_clock_and_voltage_limits *max_limits;
3981 	int i;
3982 
3983 	if (rdev->pm.dpm.ac_power)
3984 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3985 	else
3986 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3987 
3988 	if (enable) {
3989 		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3990 		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3991 			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3992 				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3993 
3994 				if (!pi->caps_samu_dpm)
3995 					break;
3996 			}
3997 		}
3998 
3999 		ci_send_msg_to_smc_with_parameter(rdev,
4000 						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
4001 						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4002 	}
4003 	return (ci_send_msg_to_smc(rdev, enable ?
4004 				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4005 		0 : -EINVAL;
4006 }
4007 
4008 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
4009 {
4010 	struct ci_power_info *pi = ci_get_pi(rdev);
4011 	const struct radeon_clock_and_voltage_limits *max_limits;
4012 	int i;
4013 
4014 	if (rdev->pm.dpm.ac_power)
4015 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4016 	else
4017 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4018 
4019 	if (enable) {
4020 		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4021 		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4022 			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4023 				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4024 
4025 				if (!pi->caps_acp_dpm)
4026 					break;
4027 			}
4028 		}
4029 
4030 		ci_send_msg_to_smc_with_parameter(rdev,
4031 						  PPSMC_MSG_ACPDPM_SetEnabledMask,
4032 						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4033 	}
4034 
4035 	return (ci_send_msg_to_smc(rdev, enable ?
4036 				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4037 		0 : -EINVAL;
4038 }
4039 #endif
4040 
4041 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
4042 {
4043 	struct ci_power_info *pi = ci_get_pi(rdev);
4044 	u32 tmp;
4045 
4046 	if (!gate) {
4047 		if (pi->caps_uvd_dpm ||
4048 		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4049 			pi->smc_state_table.UvdBootLevel = 0;
4050 		else
4051 			pi->smc_state_table.UvdBootLevel =
4052 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4053 
4054 		tmp = RREG32_SMC(DPM_TABLE_475);
4055 		tmp &= ~UvdBootLevel_MASK;
4056 		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
4057 		WREG32_SMC(DPM_TABLE_475, tmp);
4058 	}
4059 
4060 	return ci_enable_uvd_dpm(rdev, !gate);
4061 }
4062 
4063 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
4064 {
4065 	u8 i;
4066 	u32 min_evclk = 30000; /* ??? */
4067 	struct radeon_vce_clock_voltage_dependency_table *table =
4068 		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4069 
4070 	for (i = 0; i < table->count; i++) {
4071 		if (table->entries[i].evclk >= min_evclk)
4072 			return i;
4073 	}
4074 
4075 	return table->count - 1;
4076 }
4077 
4078 static int ci_update_vce_dpm(struct radeon_device *rdev,
4079 			     struct radeon_ps *radeon_new_state,
4080 			     struct radeon_ps *radeon_current_state)
4081 {
4082 	struct ci_power_info *pi = ci_get_pi(rdev);
4083 	int ret = 0;
4084 	u32 tmp;
4085 
4086 	if (radeon_current_state->evclk != radeon_new_state->evclk) {
4087 		if (radeon_new_state->evclk) {
4088 			/* turn the clocks on when encoding */
4089 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
4090 
4091 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
4092 			tmp = RREG32_SMC(DPM_TABLE_475);
4093 			tmp &= ~VceBootLevel_MASK;
4094 			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
4095 			WREG32_SMC(DPM_TABLE_475, tmp);
4096 
4097 			ret = ci_enable_vce_dpm(rdev, true);
4098 		} else {
4099 			/* turn the clocks off when not encoding */
4100 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
4101 
4102 			ret = ci_enable_vce_dpm(rdev, false);
4103 		}
4104 	}
4105 	return ret;
4106 }
4107 
4108 #if 0
4109 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
4110 {
4111 	return ci_enable_samu_dpm(rdev, gate);
4112 }
4113 
4114 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
4115 {
4116 	struct ci_power_info *pi = ci_get_pi(rdev);
4117 	u32 tmp;
4118 
4119 	if (!gate) {
4120 		pi->smc_state_table.AcpBootLevel = 0;
4121 
4122 		tmp = RREG32_SMC(DPM_TABLE_475);
4123 		tmp &= ~AcpBootLevel_MASK;
4124 		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4125 		WREG32_SMC(DPM_TABLE_475, tmp);
4126 	}
4127 
4128 	return ci_enable_acp_dpm(rdev, !gate);
4129 }
4130 #endif
4131 
4132 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
4133 					     struct radeon_ps *radeon_state)
4134 {
4135 	struct ci_power_info *pi = ci_get_pi(rdev);
4136 	int ret;
4137 
4138 	ret = ci_trim_dpm_states(rdev, radeon_state);
4139 	if (ret)
4140 		return ret;
4141 
4142 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4143 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4144 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4145 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4146 	pi->last_mclk_dpm_enable_mask =
4147 		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4148 	if (pi->uvd_enabled) {
4149 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4150 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4151 	}
4152 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4153 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4154 
4155 	return 0;
4156 }
4157 
4158 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
4159 				       u32 level_mask)
4160 {
4161 	u32 level = 0;
4162 
4163 	while ((level_mask & (1 << level)) == 0)
4164 		level++;
4165 
4166 	return level;
4167 }
4168 
4169 
4170 int ci_dpm_force_performance_level(struct radeon_device *rdev,
4171 				   enum radeon_dpm_forced_level level)
4172 {
4173 	struct ci_power_info *pi = ci_get_pi(rdev);
4174 	u32 tmp, levels, i;
4175 	int ret;
4176 
4177 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4178 		if ((!pi->pcie_dpm_key_disabled) &&
4179 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4180 			levels = 0;
4181 			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4182 			while (tmp >>= 1)
4183 				levels++;
4184 			if (levels) {
4185 				ret = ci_dpm_force_state_pcie(rdev, level);
4186 				if (ret)
4187 					return ret;
4188 				for (i = 0; i < rdev->usec_timeout; i++) {
4189 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4190 					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4191 					if (tmp == levels)
4192 						break;
4193 					udelay(1);
4194 				}
4195 			}
4196 		}
4197 		if ((!pi->sclk_dpm_key_disabled) &&
4198 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4199 			levels = 0;
4200 			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4201 			while (tmp >>= 1)
4202 				levels++;
4203 			if (levels) {
4204 				ret = ci_dpm_force_state_sclk(rdev, levels);
4205 				if (ret)
4206 					return ret;
4207 				for (i = 0; i < rdev->usec_timeout; i++) {
4208 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4209 					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4210 					if (tmp == levels)
4211 						break;
4212 					udelay(1);
4213 				}
4214 			}
4215 		}
4216 		if ((!pi->mclk_dpm_key_disabled) &&
4217 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4218 			levels = 0;
4219 			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4220 			while (tmp >>= 1)
4221 				levels++;
4222 			if (levels) {
4223 				ret = ci_dpm_force_state_mclk(rdev, levels);
4224 				if (ret)
4225 					return ret;
4226 				for (i = 0; i < rdev->usec_timeout; i++) {
4227 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4228 					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4229 					if (tmp == levels)
4230 						break;
4231 					udelay(1);
4232 				}
4233 			}
4234 		}
4235 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
4236 		if ((!pi->sclk_dpm_key_disabled) &&
4237 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4238 			levels = ci_get_lowest_enabled_level(rdev,
4239 							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4240 			ret = ci_dpm_force_state_sclk(rdev, levels);
4241 			if (ret)
4242 				return ret;
4243 			for (i = 0; i < rdev->usec_timeout; i++) {
4244 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4245 				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4246 				if (tmp == levels)
4247 					break;
4248 				udelay(1);
4249 			}
4250 		}
4251 		if ((!pi->mclk_dpm_key_disabled) &&
4252 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4253 			levels = ci_get_lowest_enabled_level(rdev,
4254 							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4255 			ret = ci_dpm_force_state_mclk(rdev, levels);
4256 			if (ret)
4257 				return ret;
4258 			for (i = 0; i < rdev->usec_timeout; i++) {
4259 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4260 				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4261 				if (tmp == levels)
4262 					break;
4263 				udelay(1);
4264 			}
4265 		}
4266 		if ((!pi->pcie_dpm_key_disabled) &&
4267 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4268 			levels = ci_get_lowest_enabled_level(rdev,
4269 							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4270 			ret = ci_dpm_force_state_pcie(rdev, levels);
4271 			if (ret)
4272 				return ret;
4273 			for (i = 0; i < rdev->usec_timeout; i++) {
4274 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4275 				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4276 				if (tmp == levels)
4277 					break;
4278 				udelay(1);
4279 			}
4280 		}
4281 	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4282 		if (!pi->pcie_dpm_key_disabled) {
4283 			PPSMC_Result smc_result;
4284 
4285 			smc_result = ci_send_msg_to_smc(rdev,
4286 							PPSMC_MSG_PCIeDPM_UnForceLevel);
4287 			if (smc_result != PPSMC_Result_OK)
4288 				return -EINVAL;
4289 		}
4290 		ret = ci_upload_dpm_level_enable_mask(rdev);
4291 		if (ret)
4292 			return ret;
4293 	}
4294 
4295 	rdev->pm.dpm.forced_level = level;
4296 
4297 	return 0;
4298 }
4299 
4300 static int ci_set_mc_special_registers(struct radeon_device *rdev,
4301 				       struct ci_mc_reg_table *table)
4302 {
4303 	struct ci_power_info *pi = ci_get_pi(rdev);
4304 	u8 i, j, k;
4305 	u32 temp_reg;
4306 
4307 	for (i = 0, j = table->last; i < table->last; i++) {
4308 		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4309 			return -EINVAL;
4310 		switch(table->mc_reg_address[i].s1 << 2) {
4311 		case MC_SEQ_MISC1:
4312 			temp_reg = RREG32(MC_PMG_CMD_EMRS);
4313 			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
4314 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4315 			for (k = 0; k < table->num_entries; k++) {
4316 				table->mc_reg_table_entry[k].mc_data[j] =
4317 					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4318 			}
4319 			j++;
4320 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4321 				return -EINVAL;
4322 
4323 			temp_reg = RREG32(MC_PMG_CMD_MRS);
4324 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
4325 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4326 			for (k = 0; k < table->num_entries; k++) {
4327 				table->mc_reg_table_entry[k].mc_data[j] =
4328 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4329 				if (!pi->mem_gddr5)
4330 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4331 			}
4332 			j++;
4333 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4334 				return -EINVAL;
4335 
4336 			if (!pi->mem_gddr5) {
4337 				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
4338 				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
4339 				for (k = 0; k < table->num_entries; k++) {
4340 					table->mc_reg_table_entry[k].mc_data[j] =
4341 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4342 				}
4343 				j++;
4344 				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4345 					return -EINVAL;
4346 			}
4347 			break;
4348 		case MC_SEQ_RESERVE_M:
4349 			temp_reg = RREG32(MC_PMG_CMD_MRS1);
4350 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
4351 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4352 			for (k = 0; k < table->num_entries; k++) {
4353 				table->mc_reg_table_entry[k].mc_data[j] =
4354 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4355 			}
4356 			j++;
4357 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4358 				return -EINVAL;
4359 			break;
4360 		default:
4361 			break;
4362 		}
4363 
4364 	}
4365 
4366 	table->last = j;
4367 
4368 	return 0;
4369 }
4370 
4371 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4372 {
4373 	bool result = true;
4374 
4375 	switch(in_reg) {
4376 	case MC_SEQ_RAS_TIMING >> 2:
4377 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
4378 		break;
4379 	case MC_SEQ_DLL_STBY >> 2:
4380 		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
4381 		break;
4382 	case MC_SEQ_G5PDX_CMD0 >> 2:
4383 		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
4384 		break;
4385 	case MC_SEQ_G5PDX_CMD1 >> 2:
4386 		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
4387 		break;
4388 	case MC_SEQ_G5PDX_CTRL >> 2:
4389 		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
4390 		break;
4391 	case MC_SEQ_CAS_TIMING >> 2:
4392 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
4393 		break;
4394 	case MC_SEQ_MISC_TIMING >> 2:
4395 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4396 		break;
4397 	case MC_SEQ_MISC_TIMING2 >> 2:
4398 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4399 		break;
4400 	case MC_SEQ_PMG_DVS_CMD >> 2:
4401 		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4402 		break;
4403 	case MC_SEQ_PMG_DVS_CTL >> 2:
4404 		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4405 		break;
4406 	case MC_SEQ_RD_CTL_D0 >> 2:
4407 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4408 		break;
4409 	case MC_SEQ_RD_CTL_D1 >> 2:
4410 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4411 		break;
4412 	case MC_SEQ_WR_CTL_D0 >> 2:
4413 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4414 		break;
4415 	case MC_SEQ_WR_CTL_D1 >> 2:
4416 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4417 		break;
4418 	case MC_PMG_CMD_EMRS >> 2:
4419 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4420 		break;
4421 	case MC_PMG_CMD_MRS >> 2:
4422 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4423 		break;
4424 	case MC_PMG_CMD_MRS1 >> 2:
4425 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4426 		break;
4427 	case MC_SEQ_PMG_TIMING >> 2:
4428 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4429 		break;
4430 	case MC_PMG_CMD_MRS2 >> 2:
4431 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4432 		break;
4433 	case MC_SEQ_WR_CTL_2 >> 2:
4434 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4435 		break;
4436 	default:
4437 		result = false;
4438 		break;
4439 	}
4440 
4441 	return result;
4442 }
4443 
4444 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4445 {
4446 	u8 i, j;
4447 
4448 	for (i = 0; i < table->last; i++) {
4449 		for (j = 1; j < table->num_entries; j++) {
4450 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4451 			    table->mc_reg_table_entry[j].mc_data[i]) {
4452 				table->valid_flag |= 1 << i;
4453 				break;
4454 			}
4455 		}
4456 	}
4457 }
4458 
4459 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4460 {
4461 	u32 i;
4462 	u16 address;
4463 
4464 	for (i = 0; i < table->last; i++) {
4465 		table->mc_reg_address[i].s0 =
4466 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4467 			address : table->mc_reg_address[i].s1;
4468 	}
4469 }
4470 
4471 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4472 				      struct ci_mc_reg_table *ci_table)
4473 {
4474 	u8 i, j;
4475 
4476 	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4477 		return -EINVAL;
4478 	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4479 		return -EINVAL;
4480 
4481 	for (i = 0; i < table->last; i++)
4482 		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4483 
4484 	ci_table->last = table->last;
4485 
4486 	for (i = 0; i < table->num_entries; i++) {
4487 		ci_table->mc_reg_table_entry[i].mclk_max =
4488 			table->mc_reg_table_entry[i].mclk_max;
4489 		for (j = 0; j < table->last; j++)
4490 			ci_table->mc_reg_table_entry[i].mc_data[j] =
4491 				table->mc_reg_table_entry[i].mc_data[j];
4492 	}
4493 	ci_table->num_entries = table->num_entries;
4494 
4495 	return 0;
4496 }
4497 
4498 static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4499 				       struct ci_mc_reg_table *table)
4500 {
4501 	u8 i, k;
4502 	u32 tmp;
4503 	bool patch;
4504 
4505 	tmp = RREG32(MC_SEQ_MISC0);
4506 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4507 
4508 	if (patch &&
4509 	    ((rdev->pdev->device == 0x67B0) ||
4510 	     (rdev->pdev->device == 0x67B1))) {
4511 		for (i = 0; i < table->last; i++) {
4512 			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4513 				return -EINVAL;
4514 			switch(table->mc_reg_address[i].s1 >> 2) {
4515 			case MC_SEQ_MISC1:
4516 				for (k = 0; k < table->num_entries; k++) {
4517 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4518 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4519 						table->mc_reg_table_entry[k].mc_data[i] =
4520 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4521 							0x00000007;
4522 				}
4523 				break;
4524 			case MC_SEQ_WR_CTL_D0:
4525 				for (k = 0; k < table->num_entries; k++) {
4526 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4527 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4528 						table->mc_reg_table_entry[k].mc_data[i] =
4529 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4530 							0x0000D0DD;
4531 				}
4532 				break;
4533 			case MC_SEQ_WR_CTL_D1:
4534 				for (k = 0; k < table->num_entries; k++) {
4535 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4536 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4537 						table->mc_reg_table_entry[k].mc_data[i] =
4538 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4539 							0x0000D0DD;
4540 				}
4541 				break;
4542 			case MC_SEQ_WR_CTL_2:
4543 				for (k = 0; k < table->num_entries; k++) {
4544 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4545 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4546 						table->mc_reg_table_entry[k].mc_data[i] = 0;
4547 				}
4548 				break;
4549 			case MC_SEQ_CAS_TIMING:
4550 				for (k = 0; k < table->num_entries; k++) {
4551 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4552 						table->mc_reg_table_entry[k].mc_data[i] =
4553 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4554 							0x000C0140;
4555 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4556 						table->mc_reg_table_entry[k].mc_data[i] =
4557 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4558 							0x000C0150;
4559 				}
4560 				break;
4561 			case MC_SEQ_MISC_TIMING:
4562 				for (k = 0; k < table->num_entries; k++) {
4563 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4564 						table->mc_reg_table_entry[k].mc_data[i] =
4565 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4566 							0x00000030;
4567 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4568 						table->mc_reg_table_entry[k].mc_data[i] =
4569 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4570 							0x00000035;
4571 				}
4572 				break;
4573 			default:
4574 				break;
4575 			}
4576 		}
4577 
4578 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4579 		tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4580 		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4581 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4582 		WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4583 	}
4584 
4585 	return 0;
4586 }
4587 
4588 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4589 {
4590 	struct ci_power_info *pi = ci_get_pi(rdev);
4591 	struct atom_mc_reg_table *table;
4592 	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4593 	u8 module_index = rv770_get_memory_module_index(rdev);
4594 	int ret;
4595 
4596 	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4597 	if (!table)
4598 		return -ENOMEM;
4599 
4600 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4601 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4602 	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4603 	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4604 	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4605 	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4606 	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4607 	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4608 	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4609 	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4610 	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4611 	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4612 	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4613 	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4614 	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4615 	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4616 	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4617 	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4618 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4619 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4620 
4621 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4622 	if (ret)
4623 		goto init_mc_done;
4624 
4625 	ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4626 	if (ret)
4627 		goto init_mc_done;
4628 
4629 	ci_set_s0_mc_reg_index(ci_table);
4630 
4631 	ret = ci_register_patching_mc_seq(rdev, ci_table);
4632 	if (ret)
4633 		goto init_mc_done;
4634 
4635 	ret = ci_set_mc_special_registers(rdev, ci_table);
4636 	if (ret)
4637 		goto init_mc_done;
4638 
4639 	ci_set_valid_flag(ci_table);
4640 
4641 init_mc_done:
4642 	kfree(table);
4643 
4644 	return ret;
4645 }
4646 
4647 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4648 					SMU7_Discrete_MCRegisters *mc_reg_table)
4649 {
4650 	struct ci_power_info *pi = ci_get_pi(rdev);
4651 	u32 i, j;
4652 
4653 	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4654 		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4655 			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4656 				return -EINVAL;
4657 			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4658 			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4659 			i++;
4660 		}
4661 	}
4662 
4663 	mc_reg_table->last = (u8)i;
4664 
4665 	return 0;
4666 }
4667 
4668 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4669 				    SMU7_Discrete_MCRegisterSet *data,
4670 				    u32 num_entries, u32 valid_flag)
4671 {
4672 	u32 i, j;
4673 
4674 	for (i = 0, j = 0; j < num_entries; j++) {
4675 		if (valid_flag & (1 << j)) {
4676 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4677 			i++;
4678 		}
4679 	}
4680 }
4681 
4682 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4683 						 const u32 memory_clock,
4684 						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4685 {
4686 	struct ci_power_info *pi = ci_get_pi(rdev);
4687 	u32 i = 0;
4688 
4689 	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4690 		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4691 			break;
4692 	}
4693 
4694 	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4695 		--i;
4696 
4697 	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4698 				mc_reg_table_data, pi->mc_reg_table.last,
4699 				pi->mc_reg_table.valid_flag);
4700 }
4701 
4702 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4703 					   SMU7_Discrete_MCRegisters *mc_reg_table)
4704 {
4705 	struct ci_power_info *pi = ci_get_pi(rdev);
4706 	u32 i;
4707 
4708 	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4709 		ci_convert_mc_reg_table_entry_to_smc(rdev,
4710 						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4711 						     &mc_reg_table->data[i]);
4712 }
4713 
4714 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4715 {
4716 	struct ci_power_info *pi = ci_get_pi(rdev);
4717 	int ret;
4718 
4719 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4720 
4721 	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4722 	if (ret)
4723 		return ret;
4724 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4725 
4726 	return ci_copy_bytes_to_smc(rdev,
4727 				    pi->mc_reg_table_start,
4728 				    (u8 *)&pi->smc_mc_reg_table,
4729 				    sizeof(SMU7_Discrete_MCRegisters),
4730 				    pi->sram_end);
4731 }
4732 
4733 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4734 {
4735 	struct ci_power_info *pi = ci_get_pi(rdev);
4736 
4737 	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4738 		return 0;
4739 
4740 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4741 
4742 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4743 
4744 	return ci_copy_bytes_to_smc(rdev,
4745 				    pi->mc_reg_table_start +
4746 				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4747 				    (u8 *)&pi->smc_mc_reg_table.data[0],
4748 				    sizeof(SMU7_Discrete_MCRegisterSet) *
4749 				    pi->dpm_table.mclk_table.count,
4750 				    pi->sram_end);
4751 }
4752 
4753 static void ci_enable_voltage_control(struct radeon_device *rdev)
4754 {
4755 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4756 
4757 	tmp |= VOLT_PWRMGT_EN;
4758 	WREG32_SMC(GENERAL_PWRMGT, tmp);
4759 }
4760 
4761 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4762 						      struct radeon_ps *radeon_state)
4763 {
4764 	struct ci_ps *state = ci_get_ps(radeon_state);
4765 	int i;
4766 	u16 pcie_speed, max_speed = 0;
4767 
4768 	for (i = 0; i < state->performance_level_count; i++) {
4769 		pcie_speed = state->performance_levels[i].pcie_gen;
4770 		if (max_speed < pcie_speed)
4771 			max_speed = pcie_speed;
4772 	}
4773 
4774 	return max_speed;
4775 }
4776 
4777 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4778 {
4779 	u32 speed_cntl = 0;
4780 
4781 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4782 	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4783 
4784 	return (u16)speed_cntl;
4785 }
4786 
4787 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4788 {
4789 	u32 link_width = 0;
4790 
4791 	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4792 	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4793 
4794 	switch (link_width) {
4795 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4796 		return 1;
4797 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4798 		return 2;
4799 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4800 		return 4;
4801 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4802 		return 8;
4803 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4804 		/* not actually supported */
4805 		return 12;
4806 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4807 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4808 	default:
4809 		return 16;
4810 	}
4811 }
4812 
4813 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4814 							     struct radeon_ps *radeon_new_state,
4815 							     struct radeon_ps *radeon_current_state)
4816 {
4817 	struct ci_power_info *pi = ci_get_pi(rdev);
4818 	enum radeon_pcie_gen target_link_speed =
4819 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4820 	enum radeon_pcie_gen current_link_speed;
4821 
4822 	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4823 		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4824 	else
4825 		current_link_speed = pi->force_pcie_gen;
4826 
4827 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4828 	pi->pspp_notify_required = false;
4829 	if (target_link_speed > current_link_speed) {
4830 		switch (target_link_speed) {
4831 #ifdef CONFIG_ACPI
4832 		case RADEON_PCIE_GEN3:
4833 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4834 				break;
4835 			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4836 			if (current_link_speed == RADEON_PCIE_GEN2)
4837 				break;
4838 			fallthrough;
4839 		case RADEON_PCIE_GEN2:
4840 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4841 				break;
4842 			fallthrough;
4843 #endif
4844 		default:
4845 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4846 			break;
4847 		}
4848 	} else {
4849 		if (target_link_speed < current_link_speed)
4850 			pi->pspp_notify_required = true;
4851 	}
4852 }
4853 
4854 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4855 							   struct radeon_ps *radeon_new_state,
4856 							   struct radeon_ps *radeon_current_state)
4857 {
4858 	struct ci_power_info *pi = ci_get_pi(rdev);
4859 	enum radeon_pcie_gen target_link_speed =
4860 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4861 	u8 request;
4862 
4863 	if (pi->pspp_notify_required) {
4864 		if (target_link_speed == RADEON_PCIE_GEN3)
4865 			request = PCIE_PERF_REQ_PECI_GEN3;
4866 		else if (target_link_speed == RADEON_PCIE_GEN2)
4867 			request = PCIE_PERF_REQ_PECI_GEN2;
4868 		else
4869 			request = PCIE_PERF_REQ_PECI_GEN1;
4870 
4871 		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4872 		    (ci_get_current_pcie_speed(rdev) > 0))
4873 			return;
4874 
4875 #ifdef CONFIG_ACPI
4876 		radeon_acpi_pcie_performance_request(rdev, request, false);
4877 #endif
4878 	}
4879 }
4880 
4881 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4882 {
4883 	struct ci_power_info *pi = ci_get_pi(rdev);
4884 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4885 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4886 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4887 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4888 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4889 		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4890 
4891 	if (allowed_sclk_vddc_table == NULL)
4892 		return -EINVAL;
4893 	if (allowed_sclk_vddc_table->count < 1)
4894 		return -EINVAL;
4895 	if (allowed_mclk_vddc_table == NULL)
4896 		return -EINVAL;
4897 	if (allowed_mclk_vddc_table->count < 1)
4898 		return -EINVAL;
4899 	if (allowed_mclk_vddci_table == NULL)
4900 		return -EINVAL;
4901 	if (allowed_mclk_vddci_table->count < 1)
4902 		return -EINVAL;
4903 
4904 	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4905 	pi->max_vddc_in_pp_table =
4906 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4907 
4908 	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4909 	pi->max_vddci_in_pp_table =
4910 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4911 
4912 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4913 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4914 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4915 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4916 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4917 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4918 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4919 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4920 
4921 	return 0;
4922 }
4923 
4924 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4925 {
4926 	struct ci_power_info *pi = ci_get_pi(rdev);
4927 	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4928 	u32 leakage_index;
4929 
4930 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4931 		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4932 			*vddc = leakage_table->actual_voltage[leakage_index];
4933 			break;
4934 		}
4935 	}
4936 }
4937 
4938 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4939 {
4940 	struct ci_power_info *pi = ci_get_pi(rdev);
4941 	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4942 	u32 leakage_index;
4943 
4944 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4945 		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4946 			*vddci = leakage_table->actual_voltage[leakage_index];
4947 			break;
4948 		}
4949 	}
4950 }
4951 
4952 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4953 								      struct radeon_clock_voltage_dependency_table *table)
4954 {
4955 	u32 i;
4956 
4957 	if (table) {
4958 		for (i = 0; i < table->count; i++)
4959 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4960 	}
4961 }
4962 
4963 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4964 								       struct radeon_clock_voltage_dependency_table *table)
4965 {
4966 	u32 i;
4967 
4968 	if (table) {
4969 		for (i = 0; i < table->count; i++)
4970 			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4971 	}
4972 }
4973 
4974 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4975 									  struct radeon_vce_clock_voltage_dependency_table *table)
4976 {
4977 	u32 i;
4978 
4979 	if (table) {
4980 		for (i = 0; i < table->count; i++)
4981 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4982 	}
4983 }
4984 
4985 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4986 									  struct radeon_uvd_clock_voltage_dependency_table *table)
4987 {
4988 	u32 i;
4989 
4990 	if (table) {
4991 		for (i = 0; i < table->count; i++)
4992 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4993 	}
4994 }
4995 
4996 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4997 								   struct radeon_phase_shedding_limits_table *table)
4998 {
4999 	u32 i;
5000 
5001 	if (table) {
5002 		for (i = 0; i < table->count; i++)
5003 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
5004 	}
5005 }
5006 
5007 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
5008 							    struct radeon_clock_and_voltage_limits *table)
5009 {
5010 	if (table) {
5011 		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
5012 		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
5013 	}
5014 }
5015 
5016 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
5017 							 struct radeon_cac_leakage_table *table)
5018 {
5019 	u32 i;
5020 
5021 	if (table) {
5022 		for (i = 0; i < table->count; i++)
5023 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
5024 	}
5025 }
5026 
5027 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
5028 {
5029 
5030 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5031 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5032 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5033 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5034 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5035 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5036 	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
5037 								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5038 	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5039 								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5040 	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5041 								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5042 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5043 								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5044 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5045 								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5046 	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
5047 							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
5048 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5049 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5050 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5051 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5052 	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
5053 						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
5054 
5055 }
5056 
5057 static void ci_get_memory_type(struct radeon_device *rdev)
5058 {
5059 	struct ci_power_info *pi = ci_get_pi(rdev);
5060 	u32 tmp;
5061 
5062 	tmp = RREG32(MC_SEQ_MISC0);
5063 
5064 	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
5065 	    MC_SEQ_MISC0_GDDR5_VALUE)
5066 		pi->mem_gddr5 = true;
5067 	else
5068 		pi->mem_gddr5 = false;
5069 
5070 }
5071 
5072 static void ci_update_current_ps(struct radeon_device *rdev,
5073 				 struct radeon_ps *rps)
5074 {
5075 	struct ci_ps *new_ps = ci_get_ps(rps);
5076 	struct ci_power_info *pi = ci_get_pi(rdev);
5077 
5078 	pi->current_rps = *rps;
5079 	pi->current_ps = *new_ps;
5080 	pi->current_rps.ps_priv = &pi->current_ps;
5081 }
5082 
5083 static void ci_update_requested_ps(struct radeon_device *rdev,
5084 				   struct radeon_ps *rps)
5085 {
5086 	struct ci_ps *new_ps = ci_get_ps(rps);
5087 	struct ci_power_info *pi = ci_get_pi(rdev);
5088 
5089 	pi->requested_rps = *rps;
5090 	pi->requested_ps = *new_ps;
5091 	pi->requested_rps.ps_priv = &pi->requested_ps;
5092 }
5093 
5094 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
5095 {
5096 	struct ci_power_info *pi = ci_get_pi(rdev);
5097 	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5098 	struct radeon_ps *new_ps = &requested_ps;
5099 
5100 	ci_update_requested_ps(rdev, new_ps);
5101 
5102 	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
5103 
5104 	return 0;
5105 }
5106 
5107 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
5108 {
5109 	struct ci_power_info *pi = ci_get_pi(rdev);
5110 	struct radeon_ps *new_ps = &pi->requested_rps;
5111 
5112 	ci_update_current_ps(rdev, new_ps);
5113 }
5114 
5115 
5116 void ci_dpm_setup_asic(struct radeon_device *rdev)
5117 {
5118 	int r;
5119 
5120 	r = ci_mc_load_microcode(rdev);
5121 	if (r)
5122 		DRM_ERROR("Failed to load MC firmware!\n");
5123 	ci_read_clock_registers(rdev);
5124 	ci_get_memory_type(rdev);
5125 	ci_enable_acpi_power_management(rdev);
5126 	ci_init_sclk_t(rdev);
5127 }
5128 
5129 int ci_dpm_enable(struct radeon_device *rdev)
5130 {
5131 	struct ci_power_info *pi = ci_get_pi(rdev);
5132 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5133 	int ret;
5134 
5135 	if (ci_is_smc_running(rdev))
5136 		return -EINVAL;
5137 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5138 		ci_enable_voltage_control(rdev);
5139 		ret = ci_construct_voltage_tables(rdev);
5140 		if (ret) {
5141 			DRM_ERROR("ci_construct_voltage_tables failed\n");
5142 			return ret;
5143 		}
5144 	}
5145 	if (pi->caps_dynamic_ac_timing) {
5146 		ret = ci_initialize_mc_reg_table(rdev);
5147 		if (ret)
5148 			pi->caps_dynamic_ac_timing = false;
5149 	}
5150 	if (pi->dynamic_ss)
5151 		ci_enable_spread_spectrum(rdev, true);
5152 	if (pi->thermal_protection)
5153 		ci_enable_thermal_protection(rdev, true);
5154 	ci_program_sstp(rdev);
5155 	ci_enable_display_gap(rdev);
5156 	ci_program_vc(rdev);
5157 	ret = ci_upload_firmware(rdev);
5158 	if (ret) {
5159 		DRM_ERROR("ci_upload_firmware failed\n");
5160 		return ret;
5161 	}
5162 	ret = ci_process_firmware_header(rdev);
5163 	if (ret) {
5164 		DRM_ERROR("ci_process_firmware_header failed\n");
5165 		return ret;
5166 	}
5167 	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
5168 	if (ret) {
5169 		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5170 		return ret;
5171 	}
5172 	ret = ci_init_smc_table(rdev);
5173 	if (ret) {
5174 		DRM_ERROR("ci_init_smc_table failed\n");
5175 		return ret;
5176 	}
5177 	ret = ci_init_arb_table_index(rdev);
5178 	if (ret) {
5179 		DRM_ERROR("ci_init_arb_table_index failed\n");
5180 		return ret;
5181 	}
5182 	if (pi->caps_dynamic_ac_timing) {
5183 		ret = ci_populate_initial_mc_reg_table(rdev);
5184 		if (ret) {
5185 			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5186 			return ret;
5187 		}
5188 	}
5189 	ret = ci_populate_pm_base(rdev);
5190 	if (ret) {
5191 		DRM_ERROR("ci_populate_pm_base failed\n");
5192 		return ret;
5193 	}
5194 	ci_dpm_start_smc(rdev);
5195 	ci_enable_vr_hot_gpio_interrupt(rdev);
5196 	ret = ci_notify_smc_display_change(rdev, false);
5197 	if (ret) {
5198 		DRM_ERROR("ci_notify_smc_display_change failed\n");
5199 		return ret;
5200 	}
5201 	ci_enable_sclk_control(rdev, true);
5202 	ret = ci_enable_ulv(rdev, true);
5203 	if (ret) {
5204 		DRM_ERROR("ci_enable_ulv failed\n");
5205 		return ret;
5206 	}
5207 	ret = ci_enable_ds_master_switch(rdev, true);
5208 	if (ret) {
5209 		DRM_ERROR("ci_enable_ds_master_switch failed\n");
5210 		return ret;
5211 	}
5212 	ret = ci_start_dpm(rdev);
5213 	if (ret) {
5214 		DRM_ERROR("ci_start_dpm failed\n");
5215 		return ret;
5216 	}
5217 	ret = ci_enable_didt(rdev, true);
5218 	if (ret) {
5219 		DRM_ERROR("ci_enable_didt failed\n");
5220 		return ret;
5221 	}
5222 	ret = ci_enable_smc_cac(rdev, true);
5223 	if (ret) {
5224 		DRM_ERROR("ci_enable_smc_cac failed\n");
5225 		return ret;
5226 	}
5227 	ret = ci_enable_power_containment(rdev, true);
5228 	if (ret) {
5229 		DRM_ERROR("ci_enable_power_containment failed\n");
5230 		return ret;
5231 	}
5232 
5233 	ret = ci_power_control_set_level(rdev);
5234 	if (ret) {
5235 		DRM_ERROR("ci_power_control_set_level failed\n");
5236 		return ret;
5237 	}
5238 
5239 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5240 
5241 	ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
5242 	if (ret) {
5243 		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5244 		return ret;
5245 	}
5246 
5247 	ci_thermal_start_thermal_controller(rdev);
5248 
5249 	ci_update_current_ps(rdev, boot_ps);
5250 
5251 	return 0;
5252 }
5253 
5254 static int ci_set_temperature_range(struct radeon_device *rdev)
5255 {
5256 	int ret;
5257 
5258 	ret = ci_thermal_enable_alert(rdev, false);
5259 	if (ret)
5260 		return ret;
5261 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5262 	if (ret)
5263 		return ret;
5264 	ret = ci_thermal_enable_alert(rdev, true);
5265 	if (ret)
5266 		return ret;
5267 
5268 	return ret;
5269 }
5270 
5271 int ci_dpm_late_enable(struct radeon_device *rdev)
5272 {
5273 	int ret;
5274 
5275 	ret = ci_set_temperature_range(rdev);
5276 	if (ret)
5277 		return ret;
5278 
5279 	ci_dpm_powergate_uvd(rdev, true);
5280 
5281 	return 0;
5282 }
5283 
5284 void ci_dpm_disable(struct radeon_device *rdev)
5285 {
5286 	struct ci_power_info *pi = ci_get_pi(rdev);
5287 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5288 
5289 	ci_dpm_powergate_uvd(rdev, false);
5290 
5291 	if (!ci_is_smc_running(rdev))
5292 		return;
5293 
5294 	ci_thermal_stop_thermal_controller(rdev);
5295 
5296 	if (pi->thermal_protection)
5297 		ci_enable_thermal_protection(rdev, false);
5298 	ci_enable_power_containment(rdev, false);
5299 	ci_enable_smc_cac(rdev, false);
5300 	ci_enable_didt(rdev, false);
5301 	ci_enable_spread_spectrum(rdev, false);
5302 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5303 	ci_stop_dpm(rdev);
5304 	ci_enable_ds_master_switch(rdev, false);
5305 	ci_enable_ulv(rdev, false);
5306 	ci_clear_vc(rdev);
5307 	ci_reset_to_default(rdev);
5308 	ci_dpm_stop_smc(rdev);
5309 	ci_force_switch_to_arb_f0(rdev);
5310 	ci_enable_thermal_based_sclk_dpm(rdev, false);
5311 
5312 	ci_update_current_ps(rdev, boot_ps);
5313 }
5314 
5315 int ci_dpm_set_power_state(struct radeon_device *rdev)
5316 {
5317 	struct ci_power_info *pi = ci_get_pi(rdev);
5318 	struct radeon_ps *new_ps = &pi->requested_rps;
5319 	struct radeon_ps *old_ps = &pi->current_rps;
5320 	int ret;
5321 
5322 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
5323 	if (pi->pcie_performance_request)
5324 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5325 	ret = ci_freeze_sclk_mclk_dpm(rdev);
5326 	if (ret) {
5327 		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5328 		return ret;
5329 	}
5330 	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
5331 	if (ret) {
5332 		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5333 		return ret;
5334 	}
5335 	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
5336 	if (ret) {
5337 		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5338 		return ret;
5339 	}
5340 
5341 	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
5342 	if (ret) {
5343 		DRM_ERROR("ci_update_vce_dpm failed\n");
5344 		return ret;
5345 	}
5346 
5347 	ret = ci_update_sclk_t(rdev);
5348 	if (ret) {
5349 		DRM_ERROR("ci_update_sclk_t failed\n");
5350 		return ret;
5351 	}
5352 	if (pi->caps_dynamic_ac_timing) {
5353 		ret = ci_update_and_upload_mc_reg_table(rdev);
5354 		if (ret) {
5355 			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5356 			return ret;
5357 		}
5358 	}
5359 	ret = ci_program_memory_timing_parameters(rdev);
5360 	if (ret) {
5361 		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5362 		return ret;
5363 	}
5364 	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
5365 	if (ret) {
5366 		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5367 		return ret;
5368 	}
5369 	ret = ci_upload_dpm_level_enable_mask(rdev);
5370 	if (ret) {
5371 		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5372 		return ret;
5373 	}
5374 	if (pi->pcie_performance_request)
5375 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5376 
5377 	return 0;
5378 }
5379 
5380 #if 0
5381 void ci_dpm_reset_asic(struct radeon_device *rdev)
5382 {
5383 	ci_set_boot_state(rdev);
5384 }
5385 #endif
5386 
5387 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
5388 {
5389 	ci_program_display_gap(rdev);
5390 }
5391 
5392 union power_info {
5393 	struct _ATOM_POWERPLAY_INFO info;
5394 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
5395 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
5396 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5397 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5398 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5399 };
5400 
5401 union pplib_clock_info {
5402 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5403 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5404 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5405 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5406 	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5407 	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5408 };
5409 
5410 union pplib_power_state {
5411 	struct _ATOM_PPLIB_STATE v1;
5412 	struct _ATOM_PPLIB_STATE_V2 v2;
5413 };
5414 
5415 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5416 					  struct radeon_ps *rps,
5417 					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5418 					  u8 table_rev)
5419 {
5420 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5421 	rps->class = le16_to_cpu(non_clock_info->usClassification);
5422 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5423 
5424 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5425 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5426 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5427 	} else {
5428 		rps->vclk = 0;
5429 		rps->dclk = 0;
5430 	}
5431 
5432 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5433 		rdev->pm.dpm.boot_ps = rps;
5434 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5435 		rdev->pm.dpm.uvd_ps = rps;
5436 }
5437 
5438 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5439 				      struct radeon_ps *rps, int index,
5440 				      union pplib_clock_info *clock_info)
5441 {
5442 	struct ci_power_info *pi = ci_get_pi(rdev);
5443 	struct ci_ps *ps = ci_get_ps(rps);
5444 	struct ci_pl *pl = &ps->performance_levels[index];
5445 
5446 	ps->performance_level_count = index + 1;
5447 
5448 	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5449 	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5450 	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5451 	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5452 
5453 	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5454 						 pi->sys_pcie_mask,
5455 						 pi->vbios_boot_state.pcie_gen_bootup_value,
5456 						 clock_info->ci.ucPCIEGen);
5457 	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5458 						   pi->vbios_boot_state.pcie_lane_bootup_value,
5459 						   le16_to_cpu(clock_info->ci.usPCIELane));
5460 
5461 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5462 		pi->acpi_pcie_gen = pl->pcie_gen;
5463 	}
5464 
5465 	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5466 		pi->ulv.supported = true;
5467 		pi->ulv.pl = *pl;
5468 		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5469 	}
5470 
5471 	/* patch up boot state */
5472 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5473 		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5474 		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5475 		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5476 		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5477 	}
5478 
5479 	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5480 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5481 		pi->use_pcie_powersaving_levels = true;
5482 		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5483 			pi->pcie_gen_powersaving.max = pl->pcie_gen;
5484 		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5485 			pi->pcie_gen_powersaving.min = pl->pcie_gen;
5486 		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5487 			pi->pcie_lane_powersaving.max = pl->pcie_lane;
5488 		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5489 			pi->pcie_lane_powersaving.min = pl->pcie_lane;
5490 		break;
5491 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5492 		pi->use_pcie_performance_levels = true;
5493 		if (pi->pcie_gen_performance.max < pl->pcie_gen)
5494 			pi->pcie_gen_performance.max = pl->pcie_gen;
5495 		if (pi->pcie_gen_performance.min > pl->pcie_gen)
5496 			pi->pcie_gen_performance.min = pl->pcie_gen;
5497 		if (pi->pcie_lane_performance.max < pl->pcie_lane)
5498 			pi->pcie_lane_performance.max = pl->pcie_lane;
5499 		if (pi->pcie_lane_performance.min > pl->pcie_lane)
5500 			pi->pcie_lane_performance.min = pl->pcie_lane;
5501 		break;
5502 	default:
5503 		break;
5504 	}
5505 }
5506 
5507 static int ci_parse_power_table(struct radeon_device *rdev)
5508 {
5509 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5510 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5511 	union pplib_power_state *power_state;
5512 	int i, j, k, non_clock_array_index, clock_array_index;
5513 	union pplib_clock_info *clock_info;
5514 	struct _StateArray *state_array;
5515 	struct _ClockInfoArray *clock_info_array;
5516 	struct _NonClockInfoArray *non_clock_info_array;
5517 	union power_info *power_info;
5518 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5519 	u16 data_offset;
5520 	u8 frev, crev;
5521 	u8 *power_state_offset;
5522 	struct ci_ps *ps;
5523 
5524 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5525 				   &frev, &crev, &data_offset))
5526 		return -EINVAL;
5527 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5528 
5529 	state_array = (struct _StateArray *)
5530 		(mode_info->atom_context->bios + data_offset +
5531 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5532 	clock_info_array = (struct _ClockInfoArray *)
5533 		(mode_info->atom_context->bios + data_offset +
5534 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5535 	non_clock_info_array = (struct _NonClockInfoArray *)
5536 		(mode_info->atom_context->bios + data_offset +
5537 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5538 
5539 	rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
5540 				  sizeof(struct radeon_ps),
5541 				  GFP_KERNEL);
5542 	if (!rdev->pm.dpm.ps)
5543 		return -ENOMEM;
5544 	power_state_offset = (u8 *)state_array->states;
5545 	rdev->pm.dpm.num_ps = 0;
5546 	for (i = 0; i < state_array->ucNumEntries; i++) {
5547 		u8 *idx;
5548 		power_state = (union pplib_power_state *)power_state_offset;
5549 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
5550 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5551 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
5552 		if (!rdev->pm.power_state[i].clock_info)
5553 			return -EINVAL;
5554 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5555 		if (ps == NULL)
5556 			return -ENOMEM;
5557 		rdev->pm.dpm.ps[i].ps_priv = ps;
5558 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5559 					      non_clock_info,
5560 					      non_clock_info_array->ucEntrySize);
5561 		k = 0;
5562 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5563 		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5564 			clock_array_index = idx[j];
5565 			if (clock_array_index >= clock_info_array->ucNumEntries)
5566 				continue;
5567 			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5568 				break;
5569 			clock_info = (union pplib_clock_info *)
5570 				((u8 *)&clock_info_array->clockInfo[0] +
5571 				 (clock_array_index * clock_info_array->ucEntrySize));
5572 			ci_parse_pplib_clock_info(rdev,
5573 						  &rdev->pm.dpm.ps[i], k,
5574 						  clock_info);
5575 			k++;
5576 		}
5577 		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5578 		rdev->pm.dpm.num_ps = i + 1;
5579 	}
5580 
5581 	/* fill in the vce power states */
5582 	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5583 		u32 sclk, mclk;
5584 		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5585 		clock_info = (union pplib_clock_info *)
5586 			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5587 		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5588 		sclk |= clock_info->ci.ucEngineClockHigh << 16;
5589 		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5590 		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5591 		rdev->pm.dpm.vce_states[i].sclk = sclk;
5592 		rdev->pm.dpm.vce_states[i].mclk = mclk;
5593 	}
5594 
5595 	return 0;
5596 }
5597 
5598 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5599 				    struct ci_vbios_boot_state *boot_state)
5600 {
5601 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5602 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5603 	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5604 	u8 frev, crev;
5605 	u16 data_offset;
5606 
5607 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5608 				   &frev, &crev, &data_offset)) {
5609 		firmware_info =
5610 			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5611 						    data_offset);
5612 		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5613 		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5614 		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5615 		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5616 		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5617 		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5618 		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5619 
5620 		return 0;
5621 	}
5622 	return -EINVAL;
5623 }
5624 
5625 void ci_dpm_fini(struct radeon_device *rdev)
5626 {
5627 	int i;
5628 
5629 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5630 		kfree(rdev->pm.dpm.ps[i].ps_priv);
5631 	}
5632 	kfree(rdev->pm.dpm.ps);
5633 	kfree(rdev->pm.dpm.priv);
5634 	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5635 	r600_free_extended_power_table(rdev);
5636 }
5637 
5638 int ci_dpm_init(struct radeon_device *rdev)
5639 {
5640 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5641 	SMU7_Discrete_DpmTable  *dpm_table;
5642 	struct radeon_gpio_rec gpio;
5643 	u16 data_offset, size;
5644 	u8 frev, crev;
5645 	struct ci_power_info *pi;
5646 	enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5647 	struct pci_dev *root = rdev->pdev->bus->self;
5648 	int ret;
5649 
5650 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5651 	if (pi == NULL)
5652 		return -ENOMEM;
5653 	rdev->pm.dpm.priv = pi;
5654 
5655 	if (!pci_is_root_bus(rdev->pdev->bus))
5656 		speed_cap = pcie_get_speed_cap(root);
5657 	if (speed_cap == PCI_SPEED_UNKNOWN) {
5658 		pi->sys_pcie_mask = 0;
5659 	} else {
5660 		if (speed_cap == PCIE_SPEED_8_0GT)
5661 			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5662 				RADEON_PCIE_SPEED_50 |
5663 				RADEON_PCIE_SPEED_80;
5664 		else if (speed_cap == PCIE_SPEED_5_0GT)
5665 			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5666 				RADEON_PCIE_SPEED_50;
5667 		else
5668 			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
5669 	}
5670 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5671 
5672 	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5673 	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5674 	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5675 	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5676 
5677 	pi->pcie_lane_performance.max = 0;
5678 	pi->pcie_lane_performance.min = 16;
5679 	pi->pcie_lane_powersaving.max = 0;
5680 	pi->pcie_lane_powersaving.min = 16;
5681 
5682 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5683 	if (ret) {
5684 		ci_dpm_fini(rdev);
5685 		return ret;
5686 	}
5687 
5688 	ret = r600_get_platform_caps(rdev);
5689 	if (ret) {
5690 		ci_dpm_fini(rdev);
5691 		return ret;
5692 	}
5693 
5694 	ret = r600_parse_extended_power_table(rdev);
5695 	if (ret) {
5696 		ci_dpm_fini(rdev);
5697 		return ret;
5698 	}
5699 
5700 	ret = ci_parse_power_table(rdev);
5701 	if (ret) {
5702 		ci_dpm_fini(rdev);
5703 		return ret;
5704 	}
5705 
5706 	pi->dll_default_on = false;
5707 	pi->sram_end = SMC_RAM_END;
5708 
5709 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5710 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5711 	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5712 	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5713 	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5714 	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5715 	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5716 	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5717 
5718 	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5719 
5720 	pi->sclk_dpm_key_disabled = 0;
5721 	pi->mclk_dpm_key_disabled = 0;
5722 	pi->pcie_dpm_key_disabled = 0;
5723 	pi->thermal_sclk_dpm_enabled = 0;
5724 
5725 	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5726 	if ((rdev->pdev->device == 0x6658) &&
5727 	    (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5728 		pi->mclk_dpm_key_disabled = 1;
5729 	}
5730 
5731 	pi->caps_sclk_ds = true;
5732 
5733 	pi->mclk_strobe_mode_threshold = 40000;
5734 	pi->mclk_stutter_mode_threshold = 40000;
5735 	pi->mclk_edc_enable_threshold = 40000;
5736 	pi->mclk_edc_wr_enable_threshold = 40000;
5737 
5738 	ci_initialize_powertune_defaults(rdev);
5739 
5740 	pi->caps_fps = false;
5741 
5742 	pi->caps_sclk_throttle_low_notification = false;
5743 
5744 	pi->caps_uvd_dpm = true;
5745 	pi->caps_vce_dpm = true;
5746 
5747 	ci_get_leakage_voltages(rdev);
5748 	ci_patch_dependency_tables_with_leakage(rdev);
5749 	ci_set_private_data_variables_based_on_pptable(rdev);
5750 
5751 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5752 		kcalloc(4,
5753 			sizeof(struct radeon_clock_voltage_dependency_entry),
5754 			GFP_KERNEL);
5755 	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5756 		ci_dpm_fini(rdev);
5757 		return -ENOMEM;
5758 	}
5759 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5760 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5761 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5762 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5763 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5764 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5765 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5766 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5767 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5768 
5769 	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5770 	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5771 	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5772 
5773 	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5774 	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5775 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5776 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5777 
5778 	if (rdev->family == CHIP_HAWAII) {
5779 		pi->thermal_temp_setting.temperature_low = 94500;
5780 		pi->thermal_temp_setting.temperature_high = 95000;
5781 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5782 	} else {
5783 		pi->thermal_temp_setting.temperature_low = 99500;
5784 		pi->thermal_temp_setting.temperature_high = 100000;
5785 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5786 	}
5787 
5788 	pi->uvd_enabled = false;
5789 
5790 	dpm_table = &pi->smc_state_table;
5791 
5792 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5793 	if (gpio.valid) {
5794 		dpm_table->VRHotGpio = gpio.shift;
5795 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5796 	} else {
5797 		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5798 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5799 	}
5800 
5801 	gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5802 	if (gpio.valid) {
5803 		dpm_table->AcDcGpio = gpio.shift;
5804 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5805 	} else {
5806 		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5807 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5808 	}
5809 
5810 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5811 	if (gpio.valid) {
5812 		u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5813 
5814 		switch (gpio.shift) {
5815 		case 0:
5816 			tmp &= ~GNB_SLOW_MODE_MASK;
5817 			tmp |= GNB_SLOW_MODE(1);
5818 			break;
5819 		case 1:
5820 			tmp &= ~GNB_SLOW_MODE_MASK;
5821 			tmp |= GNB_SLOW_MODE(2);
5822 			break;
5823 		case 2:
5824 			tmp |= GNB_SLOW;
5825 			break;
5826 		case 3:
5827 			tmp |= FORCE_NB_PS1;
5828 			break;
5829 		case 4:
5830 			tmp |= DPM_ENABLED;
5831 			break;
5832 		default:
5833 			DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
5834 			break;
5835 		}
5836 		WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5837 	}
5838 
5839 	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5840 	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5841 	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5842 	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5843 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5844 	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5845 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5846 
5847 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5848 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5849 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5850 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5851 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5852 		else
5853 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5854 	}
5855 
5856 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5857 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5858 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5859 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5860 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5861 		else
5862 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5863 	}
5864 
5865 	pi->vddc_phase_shed_control = true;
5866 
5867 #if defined(CONFIG_ACPI)
5868 	pi->pcie_performance_request =
5869 		radeon_acpi_is_pcie_performance_request_supported(rdev);
5870 #else
5871 	pi->pcie_performance_request = false;
5872 #endif
5873 
5874 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5875 				   &frev, &crev, &data_offset)) {
5876 		pi->caps_sclk_ss_support = true;
5877 		pi->caps_mclk_ss_support = true;
5878 		pi->dynamic_ss = true;
5879 	} else {
5880 		pi->caps_sclk_ss_support = false;
5881 		pi->caps_mclk_ss_support = false;
5882 		pi->dynamic_ss = true;
5883 	}
5884 
5885 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5886 		pi->thermal_protection = true;
5887 	else
5888 		pi->thermal_protection = false;
5889 
5890 	pi->caps_dynamic_ac_timing = true;
5891 
5892 	pi->uvd_power_gated = false;
5893 
5894 	/* make sure dc limits are valid */
5895 	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5896 	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5897 		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5898 			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5899 
5900 	pi->fan_ctrl_is_in_default_mode = true;
5901 
5902 	return 0;
5903 }
5904 
5905 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5906 						    struct seq_file *m)
5907 {
5908 	struct ci_power_info *pi = ci_get_pi(rdev);
5909 	struct radeon_ps *rps = &pi->current_rps;
5910 	u32 sclk = ci_get_average_sclk_freq(rdev);
5911 	u32 mclk = ci_get_average_mclk_freq(rdev);
5912 
5913 	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5914 	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
5915 	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5916 		   sclk, mclk);
5917 }
5918 
5919 void ci_dpm_print_power_state(struct radeon_device *rdev,
5920 			      struct radeon_ps *rps)
5921 {
5922 	struct ci_ps *ps = ci_get_ps(rps);
5923 	struct ci_pl *pl;
5924 	int i;
5925 
5926 	r600_dpm_print_class_info(rps->class, rps->class2);
5927 	r600_dpm_print_cap_info(rps->caps);
5928 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5929 	for (i = 0; i < ps->performance_level_count; i++) {
5930 		pl = &ps->performance_levels[i];
5931 		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5932 		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5933 	}
5934 	r600_dpm_print_ps_status(rdev, rps);
5935 }
5936 
5937 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
5938 {
5939 	u32 sclk = ci_get_average_sclk_freq(rdev);
5940 
5941 	return sclk;
5942 }
5943 
5944 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
5945 {
5946 	u32 mclk = ci_get_average_mclk_freq(rdev);
5947 
5948 	return mclk;
5949 }
5950 
5951 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5952 {
5953 	struct ci_power_info *pi = ci_get_pi(rdev);
5954 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5955 
5956 	if (low)
5957 		return requested_state->performance_levels[0].sclk;
5958 	else
5959 		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5960 }
5961 
5962 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5963 {
5964 	struct ci_power_info *pi = ci_get_pi(rdev);
5965 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5966 
5967 	if (low)
5968 		return requested_state->performance_levels[0].mclk;
5969 	else
5970 		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5971 }
5972