xref: /linux/drivers/gpu/drm/radeon/ci_dpm.c (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "drmP.h"
25 #include "radeon.h"
26 #include "cikd.h"
27 #include "r600_dpm.h"
28 #include "ci_dpm.h"
29 #include "atom.h"
30 #include <linux/seq_file.h>
31 
32 #define MC_CG_ARB_FREQ_F0           0x0a
33 #define MC_CG_ARB_FREQ_F1           0x0b
34 #define MC_CG_ARB_FREQ_F2           0x0c
35 #define MC_CG_ARB_FREQ_F3           0x0d
36 
37 #define SMC_RAM_END 0x40000
38 
39 #define VOLTAGE_SCALE               4
40 #define VOLTAGE_VID_OFFSET_SCALE1    625
41 #define VOLTAGE_VID_OFFSET_SCALE2    100
42 
43 static const struct ci_pt_defaults defaults_hawaii_xt =
44 {
45 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
46 	{ 0x84,  0x0,   0x0,   0x7F,  0x0,   0x0,   0x5A,  0x60,  0x51,  0x8E,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
47 	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
48 };
49 
50 static const struct ci_pt_defaults defaults_hawaii_pro =
51 {
52 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
53 	{ 0x93,  0x0,   0x0,   0x97,  0x0,   0x0,   0x6B,  0x60,  0x51,  0x95,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
54 	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
55 };
56 
57 static const struct ci_pt_defaults defaults_bonaire_xt =
58 {
59 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
60 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
61 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
62 };
63 
64 static const struct ci_pt_defaults defaults_bonaire_pro =
65 {
66 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
67 	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
68 	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
69 };
70 
71 static const struct ci_pt_defaults defaults_saturn_xt =
72 {
73 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
74 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
75 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
76 };
77 
78 static const struct ci_pt_defaults defaults_saturn_pro =
79 {
80 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
81 	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
82 	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
83 };
84 
85 static const struct ci_pt_config_reg didt_config_ci[] =
86 {
87 	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
88 	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
89 	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
90 	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
100 	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
101 	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
102 	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
103 	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
104 	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
105 	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
106 	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107 	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108 	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
118 	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
119 	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
120 	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
121 	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
122 	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
136 	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
137 	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
138 	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
139 	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
140 	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147 	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148 	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149 	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
154 	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
155 	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
156 	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
157 	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
158 	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 	{ 0xFFFFFFFF }
160 };
161 
162 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
163 extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
164 							    u32 *max_clock);
165 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
166 				       u32 arb_freq_src, u32 arb_freq_dest);
167 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
168 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
169 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
170 						     u32 max_voltage_steps,
171 						     struct atom_voltage_table *voltage_table);
172 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
173 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
174 extern int ci_mc_load_microcode(struct radeon_device *rdev);
175 
176 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
177 					 struct atom_voltage_table_entry *voltage_table,
178 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
179 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
180 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
181 				       u32 target_tdp);
182 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
183 
184 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
185 {
186         struct ci_power_info *pi = rdev->pm.dpm.priv;
187 
188         return pi;
189 }
190 
191 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
192 {
193 	struct ci_ps *ps = rps->ps_priv;
194 
195 	return ps;
196 }
197 
198 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
199 {
200 	struct ci_power_info *pi = ci_get_pi(rdev);
201 
202 	switch (rdev->pdev->device) {
203 	case 0x6650:
204 	case 0x6658:
205 	case 0x665C:
206 	default:
207 		pi->powertune_defaults = &defaults_bonaire_xt;
208 		break;
209 	case 0x6651:
210 	case 0x665D:
211 		pi->powertune_defaults = &defaults_bonaire_pro;
212 		break;
213 	case 0x6640:
214 		pi->powertune_defaults = &defaults_saturn_xt;
215 		break;
216 	case 0x6641:
217 		pi->powertune_defaults = &defaults_saturn_pro;
218 		break;
219 	case 0x67B8:
220 	case 0x67B0:
221 	case 0x67A0:
222 	case 0x67A1:
223 	case 0x67A2:
224 	case 0x67A8:
225 	case 0x67A9:
226 	case 0x67AA:
227 	case 0x67B9:
228 	case 0x67BE:
229 		pi->powertune_defaults = &defaults_hawaii_xt;
230 		break;
231 	case 0x67BA:
232 	case 0x67B1:
233 		pi->powertune_defaults = &defaults_hawaii_pro;
234 		break;
235 	}
236 
237 	pi->dte_tj_offset = 0;
238 
239 	pi->caps_power_containment = true;
240 	pi->caps_cac = false;
241 	pi->caps_sq_ramping = false;
242 	pi->caps_db_ramping = false;
243 	pi->caps_td_ramping = false;
244 	pi->caps_tcp_ramping = false;
245 
246 	if (pi->caps_power_containment) {
247 		pi->caps_cac = true;
248 		pi->enable_bapm_feature = true;
249 		pi->enable_tdc_limit_feature = true;
250 		pi->enable_pkg_pwr_tracking_feature = true;
251 	}
252 }
253 
254 static u8 ci_convert_to_vid(u16 vddc)
255 {
256 	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
257 }
258 
259 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
260 {
261 	struct ci_power_info *pi = ci_get_pi(rdev);
262 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
263 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
264 	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
265 	u32 i;
266 
267 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
268 		return -EINVAL;
269 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
270 		return -EINVAL;
271 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
272 	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
273 		return -EINVAL;
274 
275 	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
276 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
277 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
278 			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
279 			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
280 		} else {
281 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
282 			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
283 		}
284 	}
285 	return 0;
286 }
287 
288 static int ci_populate_vddc_vid(struct radeon_device *rdev)
289 {
290 	struct ci_power_info *pi = ci_get_pi(rdev);
291 	u8 *vid = pi->smc_powertune_table.VddCVid;
292 	u32 i;
293 
294 	if (pi->vddc_voltage_table.count > 8)
295 		return -EINVAL;
296 
297 	for (i = 0; i < pi->vddc_voltage_table.count; i++)
298 		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
299 
300 	return 0;
301 }
302 
303 static int ci_populate_svi_load_line(struct radeon_device *rdev)
304 {
305 	struct ci_power_info *pi = ci_get_pi(rdev);
306 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
307 
308 	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
309 	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
310 	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
311 	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
312 
313 	return 0;
314 }
315 
316 static int ci_populate_tdc_limit(struct radeon_device *rdev)
317 {
318 	struct ci_power_info *pi = ci_get_pi(rdev);
319 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
320 	u16 tdc_limit;
321 
322 	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
323 	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
324 	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
325 		pt_defaults->tdc_vddc_throttle_release_limit_perc;
326 	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
327 
328 	return 0;
329 }
330 
331 static int ci_populate_dw8(struct radeon_device *rdev)
332 {
333 	struct ci_power_info *pi = ci_get_pi(rdev);
334 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
335 	int ret;
336 
337 	ret = ci_read_smc_sram_dword(rdev,
338 				     SMU7_FIRMWARE_HEADER_LOCATION +
339 				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
340 				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
341 				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
342 				     pi->sram_end);
343 	if (ret)
344 		return -EINVAL;
345 	else
346 		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
347 
348 	return 0;
349 }
350 
351 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
352 {
353 	struct ci_power_info *pi = ci_get_pi(rdev);
354 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
355 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
356 	int i, min, max;
357 
358 	min = max = hi_vid[0];
359 	for (i = 0; i < 8; i++) {
360 		if (0 != hi_vid[i]) {
361 			if (min > hi_vid[i])
362 				min = hi_vid[i];
363 			if (max < hi_vid[i])
364 				max = hi_vid[i];
365 		}
366 
367 		if (0 != lo_vid[i]) {
368 			if (min > lo_vid[i])
369 				min = lo_vid[i];
370 			if (max < lo_vid[i])
371 				max = lo_vid[i];
372 		}
373 	}
374 
375 	if ((min == 0) || (max == 0))
376 		return -EINVAL;
377 	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
378 	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
379 
380 	return 0;
381 }
382 
383 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
384 {
385 	struct ci_power_info *pi = ci_get_pi(rdev);
386 	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
387 	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
388 	struct radeon_cac_tdp_table *cac_tdp_table =
389 		rdev->pm.dpm.dyn_state.cac_tdp_table;
390 
391 	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
392 	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
393 
394 	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
395 	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
396 
397 	return 0;
398 }
399 
400 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
401 {
402 	struct ci_power_info *pi = ci_get_pi(rdev);
403 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
404 	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
405 	struct radeon_cac_tdp_table *cac_tdp_table =
406 		rdev->pm.dpm.dyn_state.cac_tdp_table;
407 	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
408 	int i, j, k;
409 	const u16 *def1;
410 	const u16 *def2;
411 
412 	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
413 	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
414 
415 	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
416 	dpm_table->GpuTjMax =
417 		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
418 	dpm_table->GpuTjHyst = 8;
419 
420 	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
421 
422 	if (ppm) {
423 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
424 		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
425 	} else {
426 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
427 		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
428 	}
429 
430 	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
431 	def1 = pt_defaults->bapmti_r;
432 	def2 = pt_defaults->bapmti_rc;
433 
434 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
435 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
436 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
437 				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
438 				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
439 				def1++;
440 				def2++;
441 			}
442 		}
443 	}
444 
445 	return 0;
446 }
447 
448 static int ci_populate_pm_base(struct radeon_device *rdev)
449 {
450 	struct ci_power_info *pi = ci_get_pi(rdev);
451 	u32 pm_fuse_table_offset;
452 	int ret;
453 
454 	if (pi->caps_power_containment) {
455 		ret = ci_read_smc_sram_dword(rdev,
456 					     SMU7_FIRMWARE_HEADER_LOCATION +
457 					     offsetof(SMU7_Firmware_Header, PmFuseTable),
458 					     &pm_fuse_table_offset, pi->sram_end);
459 		if (ret)
460 			return ret;
461 		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
462 		if (ret)
463 			return ret;
464 		ret = ci_populate_vddc_vid(rdev);
465 		if (ret)
466 			return ret;
467 		ret = ci_populate_svi_load_line(rdev);
468 		if (ret)
469 			return ret;
470 		ret = ci_populate_tdc_limit(rdev);
471 		if (ret)
472 			return ret;
473 		ret = ci_populate_dw8(rdev);
474 		if (ret)
475 			return ret;
476 		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
477 		if (ret)
478 			return ret;
479 		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
480 		if (ret)
481 			return ret;
482 		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
483 					   (u8 *)&pi->smc_powertune_table,
484 					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
485 		if (ret)
486 			return ret;
487 	}
488 
489 	return 0;
490 }
491 
492 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
493 {
494 	struct ci_power_info *pi = ci_get_pi(rdev);
495 	u32 data;
496 
497 	if (pi->caps_sq_ramping) {
498 		data = RREG32_DIDT(DIDT_SQ_CTRL0);
499 		if (enable)
500 			data |= DIDT_CTRL_EN;
501 		else
502 			data &= ~DIDT_CTRL_EN;
503 		WREG32_DIDT(DIDT_SQ_CTRL0, data);
504 	}
505 
506 	if (pi->caps_db_ramping) {
507 		data = RREG32_DIDT(DIDT_DB_CTRL0);
508 		if (enable)
509 			data |= DIDT_CTRL_EN;
510 		else
511 			data &= ~DIDT_CTRL_EN;
512 		WREG32_DIDT(DIDT_DB_CTRL0, data);
513 	}
514 
515 	if (pi->caps_td_ramping) {
516 		data = RREG32_DIDT(DIDT_TD_CTRL0);
517 		if (enable)
518 			data |= DIDT_CTRL_EN;
519 		else
520 			data &= ~DIDT_CTRL_EN;
521 		WREG32_DIDT(DIDT_TD_CTRL0, data);
522 	}
523 
524 	if (pi->caps_tcp_ramping) {
525 		data = RREG32_DIDT(DIDT_TCP_CTRL0);
526 		if (enable)
527 			data |= DIDT_CTRL_EN;
528 		else
529 			data &= ~DIDT_CTRL_EN;
530 		WREG32_DIDT(DIDT_TCP_CTRL0, data);
531 	}
532 }
533 
534 static int ci_program_pt_config_registers(struct radeon_device *rdev,
535 					  const struct ci_pt_config_reg *cac_config_regs)
536 {
537 	const struct ci_pt_config_reg *config_regs = cac_config_regs;
538 	u32 data;
539 	u32 cache = 0;
540 
541 	if (config_regs == NULL)
542 		return -EINVAL;
543 
544 	while (config_regs->offset != 0xFFFFFFFF) {
545 		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
546 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
547 		} else {
548 			switch (config_regs->type) {
549 			case CISLANDS_CONFIGREG_SMC_IND:
550 				data = RREG32_SMC(config_regs->offset);
551 				break;
552 			case CISLANDS_CONFIGREG_DIDT_IND:
553 				data = RREG32_DIDT(config_regs->offset);
554 				break;
555 			default:
556 				data = RREG32(config_regs->offset << 2);
557 				break;
558 			}
559 
560 			data &= ~config_regs->mask;
561 			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
562 			data |= cache;
563 
564 			switch (config_regs->type) {
565 			case CISLANDS_CONFIGREG_SMC_IND:
566 				WREG32_SMC(config_regs->offset, data);
567 				break;
568 			case CISLANDS_CONFIGREG_DIDT_IND:
569 				WREG32_DIDT(config_regs->offset, data);
570 				break;
571 			default:
572 				WREG32(config_regs->offset << 2, data);
573 				break;
574 			}
575 			cache = 0;
576 		}
577 		config_regs++;
578 	}
579 	return 0;
580 }
581 
582 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
583 {
584 	struct ci_power_info *pi = ci_get_pi(rdev);
585 	int ret;
586 
587 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
588 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
589 		cik_enter_rlc_safe_mode(rdev);
590 
591 		if (enable) {
592 			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
593 			if (ret) {
594 				cik_exit_rlc_safe_mode(rdev);
595 				return ret;
596 			}
597 		}
598 
599 		ci_do_enable_didt(rdev, enable);
600 
601 		cik_exit_rlc_safe_mode(rdev);
602 	}
603 
604 	return 0;
605 }
606 
607 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
608 {
609 	struct ci_power_info *pi = ci_get_pi(rdev);
610 	PPSMC_Result smc_result;
611 	int ret = 0;
612 
613 	if (enable) {
614 		pi->power_containment_features = 0;
615 		if (pi->caps_power_containment) {
616 			if (pi->enable_bapm_feature) {
617 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
618 				if (smc_result != PPSMC_Result_OK)
619 					ret = -EINVAL;
620 				else
621 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
622 			}
623 
624 			if (pi->enable_tdc_limit_feature) {
625 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
626 				if (smc_result != PPSMC_Result_OK)
627 					ret = -EINVAL;
628 				else
629 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
630 			}
631 
632 			if (pi->enable_pkg_pwr_tracking_feature) {
633 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
634 				if (smc_result != PPSMC_Result_OK) {
635 					ret = -EINVAL;
636 				} else {
637 					struct radeon_cac_tdp_table *cac_tdp_table =
638 						rdev->pm.dpm.dyn_state.cac_tdp_table;
639 					u32 default_pwr_limit =
640 						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
641 
642 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
643 
644 					ci_set_power_limit(rdev, default_pwr_limit);
645 				}
646 			}
647 		}
648 	} else {
649 		if (pi->caps_power_containment && pi->power_containment_features) {
650 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
651 				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
652 
653 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
654 				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
655 
656 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
657 				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
658 			pi->power_containment_features = 0;
659 		}
660 	}
661 
662 	return ret;
663 }
664 
665 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
666 {
667 	struct ci_power_info *pi = ci_get_pi(rdev);
668 	PPSMC_Result smc_result;
669 	int ret = 0;
670 
671 	if (pi->caps_cac) {
672 		if (enable) {
673 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
674 			if (smc_result != PPSMC_Result_OK) {
675 				ret = -EINVAL;
676 				pi->cac_enabled = false;
677 			} else {
678 				pi->cac_enabled = true;
679 			}
680 		} else if (pi->cac_enabled) {
681 			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
682 			pi->cac_enabled = false;
683 		}
684 	}
685 
686 	return ret;
687 }
688 
689 static int ci_power_control_set_level(struct radeon_device *rdev)
690 {
691 	struct ci_power_info *pi = ci_get_pi(rdev);
692 	struct radeon_cac_tdp_table *cac_tdp_table =
693 		rdev->pm.dpm.dyn_state.cac_tdp_table;
694 	s32 adjust_percent;
695 	s32 target_tdp;
696 	int ret = 0;
697 	bool adjust_polarity = false; /* ??? */
698 
699 	if (pi->caps_power_containment &&
700 	    (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
701 		adjust_percent = adjust_polarity ?
702 			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
703 		target_tdp = ((100 + adjust_percent) *
704 			      (s32)cac_tdp_table->configurable_tdp) / 100;
705 		target_tdp *= 256;
706 
707 		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
708 	}
709 
710 	return ret;
711 }
712 
713 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
714 {
715 	struct ci_power_info *pi = ci_get_pi(rdev);
716 
717 	if (pi->uvd_power_gated == gate)
718 		return;
719 
720 	pi->uvd_power_gated = gate;
721 
722 	ci_update_uvd_dpm(rdev, gate);
723 }
724 
725 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
726 {
727 	struct ci_power_info *pi = ci_get_pi(rdev);
728 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
729 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
730 
731 	if (vblank_time < switch_limit)
732 		return true;
733 	else
734 		return false;
735 
736 }
737 
738 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
739 					struct radeon_ps *rps)
740 {
741 	struct ci_ps *ps = ci_get_ps(rps);
742 	struct ci_power_info *pi = ci_get_pi(rdev);
743 	struct radeon_clock_and_voltage_limits *max_limits;
744 	bool disable_mclk_switching;
745 	u32 sclk, mclk;
746 	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
747 	int i;
748 
749 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
750 	    ci_dpm_vblank_too_short(rdev))
751 		disable_mclk_switching = true;
752 	else
753 		disable_mclk_switching = false;
754 
755 	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
756 		pi->battery_state = true;
757 	else
758 		pi->battery_state = false;
759 
760 	if (rdev->pm.dpm.ac_power)
761 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
762 	else
763 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
764 
765 	if (rdev->pm.dpm.ac_power == false) {
766 		for (i = 0; i < ps->performance_level_count; i++) {
767 			if (ps->performance_levels[i].mclk > max_limits->mclk)
768 				ps->performance_levels[i].mclk = max_limits->mclk;
769 			if (ps->performance_levels[i].sclk > max_limits->sclk)
770 				ps->performance_levels[i].sclk = max_limits->sclk;
771 		}
772 	}
773 
774 	/* limit clocks to max supported clocks based on voltage dependency tables */
775 	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
776 							&max_sclk_vddc);
777 	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
778 							&max_mclk_vddci);
779 	btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
780 							&max_mclk_vddc);
781 
782 	for (i = 0; i < ps->performance_level_count; i++) {
783 		if (max_sclk_vddc) {
784 			if (ps->performance_levels[i].sclk > max_sclk_vddc)
785 				ps->performance_levels[i].sclk = max_sclk_vddc;
786 		}
787 		if (max_mclk_vddci) {
788 			if (ps->performance_levels[i].mclk > max_mclk_vddci)
789 				ps->performance_levels[i].mclk = max_mclk_vddci;
790 		}
791 		if (max_mclk_vddc) {
792 			if (ps->performance_levels[i].mclk > max_mclk_vddc)
793 				ps->performance_levels[i].mclk = max_mclk_vddc;
794 		}
795 	}
796 
797 	/* XXX validate the min clocks required for display */
798 
799 	if (disable_mclk_switching) {
800 		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
801 		sclk = ps->performance_levels[0].sclk;
802 	} else {
803 		mclk = ps->performance_levels[0].mclk;
804 		sclk = ps->performance_levels[0].sclk;
805 	}
806 
807 	ps->performance_levels[0].sclk = sclk;
808 	ps->performance_levels[0].mclk = mclk;
809 
810 	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
811 		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
812 
813 	if (disable_mclk_switching) {
814 		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
815 			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
816 	} else {
817 		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
818 			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
819 	}
820 }
821 
822 static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
823 					    int min_temp, int max_temp)
824 {
825 	int low_temp = 0 * 1000;
826 	int high_temp = 255 * 1000;
827 	u32 tmp;
828 
829 	if (low_temp < min_temp)
830 		low_temp = min_temp;
831 	if (high_temp > max_temp)
832 		high_temp = max_temp;
833 	if (high_temp < low_temp) {
834 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
835 		return -EINVAL;
836 	}
837 
838 	tmp = RREG32_SMC(CG_THERMAL_INT);
839 	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
840 	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
841 		CI_DIG_THERM_INTL(low_temp / 1000);
842 	WREG32_SMC(CG_THERMAL_INT, tmp);
843 
844 #if 0
845 	/* XXX: need to figure out how to handle this properly */
846 	tmp = RREG32_SMC(CG_THERMAL_CTRL);
847 	tmp &= DIG_THERM_DPM_MASK;
848 	tmp |= DIG_THERM_DPM(high_temp / 1000);
849 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
850 #endif
851 
852 	return 0;
853 }
854 
855 #if 0
856 static int ci_read_smc_soft_register(struct radeon_device *rdev,
857 				     u16 reg_offset, u32 *value)
858 {
859 	struct ci_power_info *pi = ci_get_pi(rdev);
860 
861 	return ci_read_smc_sram_dword(rdev,
862 				      pi->soft_regs_start + reg_offset,
863 				      value, pi->sram_end);
864 }
865 #endif
866 
867 static int ci_write_smc_soft_register(struct radeon_device *rdev,
868 				      u16 reg_offset, u32 value)
869 {
870 	struct ci_power_info *pi = ci_get_pi(rdev);
871 
872 	return ci_write_smc_sram_dword(rdev,
873 				       pi->soft_regs_start + reg_offset,
874 				       value, pi->sram_end);
875 }
876 
877 static void ci_init_fps_limits(struct radeon_device *rdev)
878 {
879 	struct ci_power_info *pi = ci_get_pi(rdev);
880 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
881 
882 	if (pi->caps_fps) {
883 		u16 tmp;
884 
885 		tmp = 45;
886 		table->FpsHighT = cpu_to_be16(tmp);
887 
888 		tmp = 30;
889 		table->FpsLowT = cpu_to_be16(tmp);
890 	}
891 }
892 
893 static int ci_update_sclk_t(struct radeon_device *rdev)
894 {
895 	struct ci_power_info *pi = ci_get_pi(rdev);
896 	int ret = 0;
897 	u32 low_sclk_interrupt_t = 0;
898 
899 	if (pi->caps_sclk_throttle_low_notification) {
900 		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
901 
902 		ret = ci_copy_bytes_to_smc(rdev,
903 					   pi->dpm_table_start +
904 					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
905 					   (u8 *)&low_sclk_interrupt_t,
906 					   sizeof(u32), pi->sram_end);
907 
908 	}
909 
910 	return ret;
911 }
912 
913 static void ci_get_leakage_voltages(struct radeon_device *rdev)
914 {
915 	struct ci_power_info *pi = ci_get_pi(rdev);
916 	u16 leakage_id, virtual_voltage_id;
917 	u16 vddc, vddci;
918 	int i;
919 
920 	pi->vddc_leakage.count = 0;
921 	pi->vddci_leakage.count = 0;
922 
923 	if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
924 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
925 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
926 			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
927 										 virtual_voltage_id,
928 										 leakage_id) == 0) {
929 				if (vddc != 0 && vddc != virtual_voltage_id) {
930 					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
931 					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
932 					pi->vddc_leakage.count++;
933 				}
934 				if (vddci != 0 && vddci != virtual_voltage_id) {
935 					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
936 					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
937 					pi->vddci_leakage.count++;
938 				}
939 			}
940 		}
941 	}
942 }
943 
944 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
945 {
946 	struct ci_power_info *pi = ci_get_pi(rdev);
947 	bool want_thermal_protection;
948 	enum radeon_dpm_event_src dpm_event_src;
949 	u32 tmp;
950 
951 	switch (sources) {
952 	case 0:
953 	default:
954 		want_thermal_protection = false;
955 		break;
956 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
957 		want_thermal_protection = true;
958 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
959 		break;
960 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
961 		want_thermal_protection = true;
962 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
963 		break;
964 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
965 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
966 		want_thermal_protection = true;
967 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
968 		break;
969 	}
970 
971 	if (want_thermal_protection) {
972 #if 0
973 		/* XXX: need to figure out how to handle this properly */
974 		tmp = RREG32_SMC(CG_THERMAL_CTRL);
975 		tmp &= DPM_EVENT_SRC_MASK;
976 		tmp |= DPM_EVENT_SRC(dpm_event_src);
977 		WREG32_SMC(CG_THERMAL_CTRL, tmp);
978 #endif
979 
980 		tmp = RREG32_SMC(GENERAL_PWRMGT);
981 		if (pi->thermal_protection)
982 			tmp &= ~THERMAL_PROTECTION_DIS;
983 		else
984 			tmp |= THERMAL_PROTECTION_DIS;
985 		WREG32_SMC(GENERAL_PWRMGT, tmp);
986 	} else {
987 		tmp = RREG32_SMC(GENERAL_PWRMGT);
988 		tmp |= THERMAL_PROTECTION_DIS;
989 		WREG32_SMC(GENERAL_PWRMGT, tmp);
990 	}
991 }
992 
993 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
994 					   enum radeon_dpm_auto_throttle_src source,
995 					   bool enable)
996 {
997 	struct ci_power_info *pi = ci_get_pi(rdev);
998 
999 	if (enable) {
1000 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1001 			pi->active_auto_throttle_sources |= 1 << source;
1002 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1003 		}
1004 	} else {
1005 		if (pi->active_auto_throttle_sources & (1 << source)) {
1006 			pi->active_auto_throttle_sources &= ~(1 << source);
1007 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1008 		}
1009 	}
1010 }
1011 
1012 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1013 {
1014 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1015 		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1016 }
1017 
1018 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1019 {
1020 	struct ci_power_info *pi = ci_get_pi(rdev);
1021 	PPSMC_Result smc_result;
1022 
1023 	if (!pi->need_update_smu7_dpm_table)
1024 		return 0;
1025 
1026 	if ((!pi->sclk_dpm_key_disabled) &&
1027 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1028 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1029 		if (smc_result != PPSMC_Result_OK)
1030 			return -EINVAL;
1031 	}
1032 
1033 	if ((!pi->mclk_dpm_key_disabled) &&
1034 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1035 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1036 		if (smc_result != PPSMC_Result_OK)
1037 			return -EINVAL;
1038 	}
1039 
1040 	pi->need_update_smu7_dpm_table = 0;
1041 	return 0;
1042 }
1043 
1044 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1045 {
1046 	struct ci_power_info *pi = ci_get_pi(rdev);
1047 	PPSMC_Result smc_result;
1048 
1049 	if (enable) {
1050 		if (!pi->sclk_dpm_key_disabled) {
1051 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1052 			if (smc_result != PPSMC_Result_OK)
1053 				return -EINVAL;
1054 		}
1055 
1056 		if (!pi->mclk_dpm_key_disabled) {
1057 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1058 			if (smc_result != PPSMC_Result_OK)
1059 				return -EINVAL;
1060 
1061 			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1062 
1063 			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1064 			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1065 			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1066 
1067 			udelay(10);
1068 
1069 			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1070 			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1071 			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1072 		}
1073 	} else {
1074 		if (!pi->sclk_dpm_key_disabled) {
1075 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1076 			if (smc_result != PPSMC_Result_OK)
1077 				return -EINVAL;
1078 		}
1079 
1080 		if (!pi->mclk_dpm_key_disabled) {
1081 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1082 			if (smc_result != PPSMC_Result_OK)
1083 				return -EINVAL;
1084 		}
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 static int ci_start_dpm(struct radeon_device *rdev)
1091 {
1092 	struct ci_power_info *pi = ci_get_pi(rdev);
1093 	PPSMC_Result smc_result;
1094 	int ret;
1095 	u32 tmp;
1096 
1097 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1098 	tmp |= GLOBAL_PWRMGT_EN;
1099 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1100 
1101 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1102 	tmp |= DYNAMIC_PM_EN;
1103 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1104 
1105 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1106 
1107 	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1108 
1109 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1110 	if (smc_result != PPSMC_Result_OK)
1111 		return -EINVAL;
1112 
1113 	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1114 	if (ret)
1115 		return ret;
1116 
1117 	if (!pi->pcie_dpm_key_disabled) {
1118 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1119 		if (smc_result != PPSMC_Result_OK)
1120 			return -EINVAL;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1127 {
1128 	struct ci_power_info *pi = ci_get_pi(rdev);
1129 	PPSMC_Result smc_result;
1130 
1131 	if (!pi->need_update_smu7_dpm_table)
1132 		return 0;
1133 
1134 	if ((!pi->sclk_dpm_key_disabled) &&
1135 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1136 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1137 		if (smc_result != PPSMC_Result_OK)
1138 			return -EINVAL;
1139 	}
1140 
1141 	if ((!pi->mclk_dpm_key_disabled) &&
1142 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1143 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1144 		if (smc_result != PPSMC_Result_OK)
1145 			return -EINVAL;
1146 	}
1147 
1148 	return 0;
1149 }
1150 
1151 static int ci_stop_dpm(struct radeon_device *rdev)
1152 {
1153 	struct ci_power_info *pi = ci_get_pi(rdev);
1154 	PPSMC_Result smc_result;
1155 	int ret;
1156 	u32 tmp;
1157 
1158 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1159 	tmp &= ~GLOBAL_PWRMGT_EN;
1160 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1161 
1162 	tmp = RREG32(SCLK_PWRMGT_CNTL);
1163 	tmp &= ~DYNAMIC_PM_EN;
1164 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1165 
1166 	if (!pi->pcie_dpm_key_disabled) {
1167 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1168 		if (smc_result != PPSMC_Result_OK)
1169 			return -EINVAL;
1170 	}
1171 
1172 	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1173 	if (ret)
1174 		return ret;
1175 
1176 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1177 	if (smc_result != PPSMC_Result_OK)
1178 		return -EINVAL;
1179 
1180 	return 0;
1181 }
1182 
1183 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1184 {
1185 	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1186 
1187 	if (enable)
1188 		tmp &= ~SCLK_PWRMGT_OFF;
1189 	else
1190 		tmp |= SCLK_PWRMGT_OFF;
1191 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1192 }
1193 
1194 #if 0
1195 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1196 					bool ac_power)
1197 {
1198 	struct ci_power_info *pi = ci_get_pi(rdev);
1199 	struct radeon_cac_tdp_table *cac_tdp_table =
1200 		rdev->pm.dpm.dyn_state.cac_tdp_table;
1201 	u32 power_limit;
1202 
1203 	if (ac_power)
1204 		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1205 	else
1206 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1207 
1208         ci_set_power_limit(rdev, power_limit);
1209 
1210 	if (pi->caps_automatic_dc_transition) {
1211 		if (ac_power)
1212 			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1213 		else
1214 			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1215 	}
1216 
1217 	return 0;
1218 }
1219 #endif
1220 
1221 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1222 						      PPSMC_Msg msg, u32 parameter)
1223 {
1224 	WREG32(SMC_MSG_ARG_0, parameter);
1225 	return ci_send_msg_to_smc(rdev, msg);
1226 }
1227 
1228 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1229 							PPSMC_Msg msg, u32 *parameter)
1230 {
1231 	PPSMC_Result smc_result;
1232 
1233 	smc_result = ci_send_msg_to_smc(rdev, msg);
1234 
1235 	if ((smc_result == PPSMC_Result_OK) && parameter)
1236 		*parameter = RREG32(SMC_MSG_ARG_0);
1237 
1238 	return smc_result;
1239 }
1240 
1241 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1242 {
1243 	struct ci_power_info *pi = ci_get_pi(rdev);
1244 
1245 	if (!pi->sclk_dpm_key_disabled) {
1246 		PPSMC_Result smc_result =
1247 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1248 		if (smc_result != PPSMC_Result_OK)
1249 			return -EINVAL;
1250 	}
1251 
1252 	return 0;
1253 }
1254 
1255 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1256 {
1257 	struct ci_power_info *pi = ci_get_pi(rdev);
1258 
1259 	if (!pi->mclk_dpm_key_disabled) {
1260 		PPSMC_Result smc_result =
1261 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1262 		if (smc_result != PPSMC_Result_OK)
1263 			return -EINVAL;
1264 	}
1265 
1266 	return 0;
1267 }
1268 
1269 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1270 {
1271 	struct ci_power_info *pi = ci_get_pi(rdev);
1272 
1273 	if (!pi->pcie_dpm_key_disabled) {
1274 		PPSMC_Result smc_result =
1275 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1276 		if (smc_result != PPSMC_Result_OK)
1277 			return -EINVAL;
1278 	}
1279 
1280 	return 0;
1281 }
1282 
1283 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1284 {
1285 	struct ci_power_info *pi = ci_get_pi(rdev);
1286 
1287 	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1288 		PPSMC_Result smc_result =
1289 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1290 		if (smc_result != PPSMC_Result_OK)
1291 			return -EINVAL;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1298 				       u32 target_tdp)
1299 {
1300 	PPSMC_Result smc_result =
1301 		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1302 	if (smc_result != PPSMC_Result_OK)
1303 		return -EINVAL;
1304 	return 0;
1305 }
1306 
1307 static int ci_set_boot_state(struct radeon_device *rdev)
1308 {
1309 	return ci_enable_sclk_mclk_dpm(rdev, false);
1310 }
1311 
1312 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1313 {
1314 	u32 sclk_freq;
1315 	PPSMC_Result smc_result =
1316 		ci_send_msg_to_smc_return_parameter(rdev,
1317 						    PPSMC_MSG_API_GetSclkFrequency,
1318 						    &sclk_freq);
1319 	if (smc_result != PPSMC_Result_OK)
1320 		sclk_freq = 0;
1321 
1322 	return sclk_freq;
1323 }
1324 
1325 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1326 {
1327 	u32 mclk_freq;
1328 	PPSMC_Result smc_result =
1329 		ci_send_msg_to_smc_return_parameter(rdev,
1330 						    PPSMC_MSG_API_GetMclkFrequency,
1331 						    &mclk_freq);
1332 	if (smc_result != PPSMC_Result_OK)
1333 		mclk_freq = 0;
1334 
1335 	return mclk_freq;
1336 }
1337 
1338 static void ci_dpm_start_smc(struct radeon_device *rdev)
1339 {
1340 	int i;
1341 
1342 	ci_program_jump_on_start(rdev);
1343 	ci_start_smc_clock(rdev);
1344 	ci_start_smc(rdev);
1345 	for (i = 0; i < rdev->usec_timeout; i++) {
1346 		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1347 			break;
1348 	}
1349 }
1350 
1351 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1352 {
1353 	ci_reset_smc(rdev);
1354 	ci_stop_smc_clock(rdev);
1355 }
1356 
1357 static int ci_process_firmware_header(struct radeon_device *rdev)
1358 {
1359 	struct ci_power_info *pi = ci_get_pi(rdev);
1360 	u32 tmp;
1361 	int ret;
1362 
1363 	ret = ci_read_smc_sram_dword(rdev,
1364 				     SMU7_FIRMWARE_HEADER_LOCATION +
1365 				     offsetof(SMU7_Firmware_Header, DpmTable),
1366 				     &tmp, pi->sram_end);
1367 	if (ret)
1368 		return ret;
1369 
1370 	pi->dpm_table_start = tmp;
1371 
1372 	ret = ci_read_smc_sram_dword(rdev,
1373 				     SMU7_FIRMWARE_HEADER_LOCATION +
1374 				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1375 				     &tmp, pi->sram_end);
1376 	if (ret)
1377 		return ret;
1378 
1379 	pi->soft_regs_start = tmp;
1380 
1381 	ret = ci_read_smc_sram_dword(rdev,
1382 				     SMU7_FIRMWARE_HEADER_LOCATION +
1383 				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1384 				     &tmp, pi->sram_end);
1385 	if (ret)
1386 		return ret;
1387 
1388 	pi->mc_reg_table_start = tmp;
1389 
1390 	ret = ci_read_smc_sram_dword(rdev,
1391 				     SMU7_FIRMWARE_HEADER_LOCATION +
1392 				     offsetof(SMU7_Firmware_Header, FanTable),
1393 				     &tmp, pi->sram_end);
1394 	if (ret)
1395 		return ret;
1396 
1397 	pi->fan_table_start = tmp;
1398 
1399 	ret = ci_read_smc_sram_dword(rdev,
1400 				     SMU7_FIRMWARE_HEADER_LOCATION +
1401 				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1402 				     &tmp, pi->sram_end);
1403 	if (ret)
1404 		return ret;
1405 
1406 	pi->arb_table_start = tmp;
1407 
1408 	return 0;
1409 }
1410 
1411 static void ci_read_clock_registers(struct radeon_device *rdev)
1412 {
1413 	struct ci_power_info *pi = ci_get_pi(rdev);
1414 
1415 	pi->clock_registers.cg_spll_func_cntl =
1416 		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1417 	pi->clock_registers.cg_spll_func_cntl_2 =
1418 		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1419 	pi->clock_registers.cg_spll_func_cntl_3 =
1420 		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1421 	pi->clock_registers.cg_spll_func_cntl_4 =
1422 		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1423 	pi->clock_registers.cg_spll_spread_spectrum =
1424 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1425 	pi->clock_registers.cg_spll_spread_spectrum_2 =
1426 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1427 	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1428 	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1429 	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1430 	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1431 	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1432 	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1433 	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1434 	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1435 	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1436 }
1437 
1438 static void ci_init_sclk_t(struct radeon_device *rdev)
1439 {
1440 	struct ci_power_info *pi = ci_get_pi(rdev);
1441 
1442 	pi->low_sclk_interrupt_t = 0;
1443 }
1444 
1445 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1446 					 bool enable)
1447 {
1448 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1449 
1450 	if (enable)
1451 		tmp &= ~THERMAL_PROTECTION_DIS;
1452 	else
1453 		tmp |= THERMAL_PROTECTION_DIS;
1454 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1455 }
1456 
1457 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1458 {
1459 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1460 
1461 	tmp |= STATIC_PM_EN;
1462 
1463 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1464 }
1465 
1466 #if 0
1467 static int ci_enter_ulp_state(struct radeon_device *rdev)
1468 {
1469 
1470 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1471 
1472 	udelay(25000);
1473 
1474 	return 0;
1475 }
1476 
1477 static int ci_exit_ulp_state(struct radeon_device *rdev)
1478 {
1479 	int i;
1480 
1481 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1482 
1483 	udelay(7000);
1484 
1485 	for (i = 0; i < rdev->usec_timeout; i++) {
1486 		if (RREG32(SMC_RESP_0) == 1)
1487 			break;
1488 		udelay(1000);
1489 	}
1490 
1491 	return 0;
1492 }
1493 #endif
1494 
1495 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1496 					bool has_display)
1497 {
1498 	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1499 
1500 	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1501 }
1502 
1503 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1504 				      bool enable)
1505 {
1506 	struct ci_power_info *pi = ci_get_pi(rdev);
1507 
1508 	if (enable) {
1509 		if (pi->caps_sclk_ds) {
1510 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1511 				return -EINVAL;
1512 		} else {
1513 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1514 				return -EINVAL;
1515 		}
1516 	} else {
1517 		if (pi->caps_sclk_ds) {
1518 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1519 				return -EINVAL;
1520 		}
1521 	}
1522 
1523 	return 0;
1524 }
1525 
1526 static void ci_program_display_gap(struct radeon_device *rdev)
1527 {
1528 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1529 	u32 pre_vbi_time_in_us;
1530 	u32 frame_time_in_us;
1531 	u32 ref_clock = rdev->clock.spll.reference_freq;
1532 	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1533 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1534 
1535 	tmp &= ~DISP_GAP_MASK;
1536 	if (rdev->pm.dpm.new_active_crtc_count > 0)
1537 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1538 	else
1539 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1540 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1541 
1542 	if (refresh_rate == 0)
1543 		refresh_rate = 60;
1544 	if (vblank_time == 0xffffffff)
1545 		vblank_time = 500;
1546 	frame_time_in_us = 1000000 / refresh_rate;
1547 	pre_vbi_time_in_us =
1548 		frame_time_in_us - 200 - vblank_time;
1549 	tmp = pre_vbi_time_in_us * (ref_clock / 100);
1550 
1551 	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1552 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1553 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1554 
1555 
1556 	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1557 
1558 }
1559 
1560 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1561 {
1562 	struct ci_power_info *pi = ci_get_pi(rdev);
1563 	u32 tmp;
1564 
1565 	if (enable) {
1566 		if (pi->caps_sclk_ss_support) {
1567 			tmp = RREG32_SMC(GENERAL_PWRMGT);
1568 			tmp |= DYN_SPREAD_SPECTRUM_EN;
1569 			WREG32_SMC(GENERAL_PWRMGT, tmp);
1570 		}
1571 	} else {
1572 		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1573 		tmp &= ~SSEN;
1574 		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1575 
1576 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1577 		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1578 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1579 	}
1580 }
1581 
1582 static void ci_program_sstp(struct radeon_device *rdev)
1583 {
1584 	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1585 }
1586 
1587 static void ci_enable_display_gap(struct radeon_device *rdev)
1588 {
1589 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1590 
1591         tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1592         tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1593                 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1594 
1595 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1596 }
1597 
1598 static void ci_program_vc(struct radeon_device *rdev)
1599 {
1600 	u32 tmp;
1601 
1602 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1603 	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1604 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1605 
1606 	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1607 	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1608 	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1609 	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1610 	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1611 	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1612 	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1613 	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1614 }
1615 
1616 static void ci_clear_vc(struct radeon_device *rdev)
1617 {
1618 	u32 tmp;
1619 
1620 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1621 	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1622 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1623 
1624 	WREG32_SMC(CG_FTV_0, 0);
1625 	WREG32_SMC(CG_FTV_1, 0);
1626 	WREG32_SMC(CG_FTV_2, 0);
1627 	WREG32_SMC(CG_FTV_3, 0);
1628 	WREG32_SMC(CG_FTV_4, 0);
1629 	WREG32_SMC(CG_FTV_5, 0);
1630 	WREG32_SMC(CG_FTV_6, 0);
1631 	WREG32_SMC(CG_FTV_7, 0);
1632 }
1633 
1634 static int ci_upload_firmware(struct radeon_device *rdev)
1635 {
1636 	struct ci_power_info *pi = ci_get_pi(rdev);
1637 	int i, ret;
1638 
1639 	for (i = 0; i < rdev->usec_timeout; i++) {
1640 		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1641 			break;
1642 	}
1643 	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1644 
1645 	ci_stop_smc_clock(rdev);
1646 	ci_reset_smc(rdev);
1647 
1648 	ret = ci_load_smc_ucode(rdev, pi->sram_end);
1649 
1650 	return ret;
1651 
1652 }
1653 
1654 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1655 				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1656 				     struct atom_voltage_table *voltage_table)
1657 {
1658 	u32 i;
1659 
1660 	if (voltage_dependency_table == NULL)
1661 		return -EINVAL;
1662 
1663 	voltage_table->mask_low = 0;
1664 	voltage_table->phase_delay = 0;
1665 
1666 	voltage_table->count = voltage_dependency_table->count;
1667 	for (i = 0; i < voltage_table->count; i++) {
1668 		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1669 		voltage_table->entries[i].smio_low = 0;
1670 	}
1671 
1672 	return 0;
1673 }
1674 
1675 static int ci_construct_voltage_tables(struct radeon_device *rdev)
1676 {
1677 	struct ci_power_info *pi = ci_get_pi(rdev);
1678 	int ret;
1679 
1680 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1681 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1682 						    VOLTAGE_OBJ_GPIO_LUT,
1683 						    &pi->vddc_voltage_table);
1684 		if (ret)
1685 			return ret;
1686 	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1687 		ret = ci_get_svi2_voltage_table(rdev,
1688 						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1689 						&pi->vddc_voltage_table);
1690 		if (ret)
1691 			return ret;
1692 	}
1693 
1694 	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1695 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1696 							 &pi->vddc_voltage_table);
1697 
1698 	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1699 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1700 						    VOLTAGE_OBJ_GPIO_LUT,
1701 						    &pi->vddci_voltage_table);
1702 		if (ret)
1703 			return ret;
1704 	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1705 		ret = ci_get_svi2_voltage_table(rdev,
1706 						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1707 						&pi->vddci_voltage_table);
1708 		if (ret)
1709 			return ret;
1710 	}
1711 
1712 	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1713 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1714 							 &pi->vddci_voltage_table);
1715 
1716 	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1717 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1718 						    VOLTAGE_OBJ_GPIO_LUT,
1719 						    &pi->mvdd_voltage_table);
1720 		if (ret)
1721 			return ret;
1722 	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1723 		ret = ci_get_svi2_voltage_table(rdev,
1724 						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1725 						&pi->mvdd_voltage_table);
1726 		if (ret)
1727 			return ret;
1728 	}
1729 
1730 	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1731 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1732 							 &pi->mvdd_voltage_table);
1733 
1734 	return 0;
1735 }
1736 
1737 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1738 					  struct atom_voltage_table_entry *voltage_table,
1739 					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
1740 {
1741 	int ret;
1742 
1743 	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1744 					    &smc_voltage_table->StdVoltageHiSidd,
1745 					    &smc_voltage_table->StdVoltageLoSidd);
1746 
1747 	if (ret) {
1748 		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1749 		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1750 	}
1751 
1752 	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1753 	smc_voltage_table->StdVoltageHiSidd =
1754 		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1755 	smc_voltage_table->StdVoltageLoSidd =
1756 		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1757 }
1758 
1759 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1760 				      SMU7_Discrete_DpmTable *table)
1761 {
1762 	struct ci_power_info *pi = ci_get_pi(rdev);
1763 	unsigned int count;
1764 
1765 	table->VddcLevelCount = pi->vddc_voltage_table.count;
1766 	for (count = 0; count < table->VddcLevelCount; count++) {
1767 		ci_populate_smc_voltage_table(rdev,
1768 					      &pi->vddc_voltage_table.entries[count],
1769 					      &table->VddcLevel[count]);
1770 
1771 		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1772 			table->VddcLevel[count].Smio |=
1773 				pi->vddc_voltage_table.entries[count].smio_low;
1774 		else
1775 			table->VddcLevel[count].Smio = 0;
1776 	}
1777 	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1778 
1779 	return 0;
1780 }
1781 
1782 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1783 				       SMU7_Discrete_DpmTable *table)
1784 {
1785 	unsigned int count;
1786 	struct ci_power_info *pi = ci_get_pi(rdev);
1787 
1788 	table->VddciLevelCount = pi->vddci_voltage_table.count;
1789 	for (count = 0; count < table->VddciLevelCount; count++) {
1790 		ci_populate_smc_voltage_table(rdev,
1791 					      &pi->vddci_voltage_table.entries[count],
1792 					      &table->VddciLevel[count]);
1793 
1794 		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1795 			table->VddciLevel[count].Smio |=
1796 				pi->vddci_voltage_table.entries[count].smio_low;
1797 		else
1798 			table->VddciLevel[count].Smio = 0;
1799 	}
1800 	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1801 
1802 	return 0;
1803 }
1804 
1805 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1806 				      SMU7_Discrete_DpmTable *table)
1807 {
1808 	struct ci_power_info *pi = ci_get_pi(rdev);
1809 	unsigned int count;
1810 
1811 	table->MvddLevelCount = pi->mvdd_voltage_table.count;
1812 	for (count = 0; count < table->MvddLevelCount; count++) {
1813 		ci_populate_smc_voltage_table(rdev,
1814 					      &pi->mvdd_voltage_table.entries[count],
1815 					      &table->MvddLevel[count]);
1816 
1817 		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1818 			table->MvddLevel[count].Smio |=
1819 				pi->mvdd_voltage_table.entries[count].smio_low;
1820 		else
1821 			table->MvddLevel[count].Smio = 0;
1822 	}
1823 	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1824 
1825 	return 0;
1826 }
1827 
1828 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1829 					  SMU7_Discrete_DpmTable *table)
1830 {
1831 	int ret;
1832 
1833 	ret = ci_populate_smc_vddc_table(rdev, table);
1834 	if (ret)
1835 		return ret;
1836 
1837 	ret = ci_populate_smc_vddci_table(rdev, table);
1838 	if (ret)
1839 		return ret;
1840 
1841 	ret = ci_populate_smc_mvdd_table(rdev, table);
1842 	if (ret)
1843 		return ret;
1844 
1845 	return 0;
1846 }
1847 
1848 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1849 				  SMU7_Discrete_VoltageLevel *voltage)
1850 {
1851 	struct ci_power_info *pi = ci_get_pi(rdev);
1852 	u32 i = 0;
1853 
1854 	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1855 		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1856 			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1857 				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1858 				break;
1859 			}
1860 		}
1861 
1862 		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1863 			return -EINVAL;
1864 	}
1865 
1866 	return -EINVAL;
1867 }
1868 
1869 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1870 					 struct atom_voltage_table_entry *voltage_table,
1871 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1872 {
1873 	u16 v_index, idx;
1874 	bool voltage_found = false;
1875 	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1876 	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1877 
1878 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1879 		return -EINVAL;
1880 
1881 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1882 		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1883 			if (voltage_table->value ==
1884 			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1885 				voltage_found = true;
1886 				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1887 					idx = v_index;
1888 				else
1889 					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1890 				*std_voltage_lo_sidd =
1891 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1892 				*std_voltage_hi_sidd =
1893 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1894 				break;
1895 			}
1896 		}
1897 
1898 		if (!voltage_found) {
1899 			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1900 				if (voltage_table->value <=
1901 				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1902 					voltage_found = true;
1903 					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1904 						idx = v_index;
1905 					else
1906 						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1907 					*std_voltage_lo_sidd =
1908 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1909 					*std_voltage_hi_sidd =
1910 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1911 					break;
1912 				}
1913 			}
1914 		}
1915 	}
1916 
1917 	return 0;
1918 }
1919 
1920 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1921 						  const struct radeon_phase_shedding_limits_table *limits,
1922 						  u32 sclk,
1923 						  u32 *phase_shedding)
1924 {
1925 	unsigned int i;
1926 
1927 	*phase_shedding = 1;
1928 
1929 	for (i = 0; i < limits->count; i++) {
1930 		if (sclk < limits->entries[i].sclk) {
1931 			*phase_shedding = i;
1932 			break;
1933 		}
1934 	}
1935 }
1936 
1937 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1938 						  const struct radeon_phase_shedding_limits_table *limits,
1939 						  u32 mclk,
1940 						  u32 *phase_shedding)
1941 {
1942 	unsigned int i;
1943 
1944 	*phase_shedding = 1;
1945 
1946 	for (i = 0; i < limits->count; i++) {
1947 		if (mclk < limits->entries[i].mclk) {
1948 			*phase_shedding = i;
1949 			break;
1950 		}
1951 	}
1952 }
1953 
1954 static int ci_init_arb_table_index(struct radeon_device *rdev)
1955 {
1956 	struct ci_power_info *pi = ci_get_pi(rdev);
1957 	u32 tmp;
1958 	int ret;
1959 
1960 	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1961 				     &tmp, pi->sram_end);
1962 	if (ret)
1963 		return ret;
1964 
1965 	tmp &= 0x00FFFFFF;
1966 	tmp |= MC_CG_ARB_FREQ_F1 << 24;
1967 
1968 	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1969 				       tmp, pi->sram_end);
1970 }
1971 
1972 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1973 					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1974 					 u32 clock, u32 *voltage)
1975 {
1976 	u32 i = 0;
1977 
1978 	if (allowed_clock_voltage_table->count == 0)
1979 		return -EINVAL;
1980 
1981 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1982 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1983 			*voltage = allowed_clock_voltage_table->entries[i].v;
1984 			return 0;
1985 		}
1986 	}
1987 
1988 	*voltage = allowed_clock_voltage_table->entries[i-1].v;
1989 
1990 	return 0;
1991 }
1992 
1993 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1994 					     u32 sclk, u32 min_sclk_in_sr)
1995 {
1996 	u32 i;
1997 	u32 tmp;
1998 	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
1999 		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2000 
2001 	if (sclk < min)
2002 		return 0;
2003 
2004 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2005 		tmp = sclk / (1 << i);
2006 		if (tmp >= min || i == 0)
2007 			break;
2008 	}
2009 
2010 	return (u8)i;
2011 }
2012 
2013 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2014 {
2015 	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2016 }
2017 
2018 static int ci_reset_to_default(struct radeon_device *rdev)
2019 {
2020 	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2021 		0 : -EINVAL;
2022 }
2023 
2024 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2025 {
2026 	u32 tmp;
2027 
2028 	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2029 
2030 	if (tmp == MC_CG_ARB_FREQ_F0)
2031 		return 0;
2032 
2033 	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2034 }
2035 
2036 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2037 						u32 sclk,
2038 						u32 mclk,
2039 						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2040 {
2041 	u32 dram_timing;
2042 	u32 dram_timing2;
2043 	u32 burst_time;
2044 
2045 	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2046 
2047 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2048 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2049 	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2050 
2051 	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2052 	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2053 	arb_regs->McArbBurstTime = (u8)burst_time;
2054 
2055 	return 0;
2056 }
2057 
2058 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2059 {
2060 	struct ci_power_info *pi = ci_get_pi(rdev);
2061 	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2062 	u32 i, j;
2063 	int ret =  0;
2064 
2065 	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2066 
2067 	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2068 		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2069 			ret = ci_populate_memory_timing_parameters(rdev,
2070 								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2071 								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2072 								   &arb_regs.entries[i][j]);
2073 			if (ret)
2074 				break;
2075 		}
2076 	}
2077 
2078 	if (ret == 0)
2079 		ret = ci_copy_bytes_to_smc(rdev,
2080 					   pi->arb_table_start,
2081 					   (u8 *)&arb_regs,
2082 					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2083 					   pi->sram_end);
2084 
2085 	return ret;
2086 }
2087 
2088 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2089 {
2090 	struct ci_power_info *pi = ci_get_pi(rdev);
2091 
2092 	if (pi->need_update_smu7_dpm_table == 0)
2093 		return 0;
2094 
2095 	return ci_do_program_memory_timing_parameters(rdev);
2096 }
2097 
2098 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2099 					  struct radeon_ps *radeon_boot_state)
2100 {
2101 	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2102 	struct ci_power_info *pi = ci_get_pi(rdev);
2103 	u32 level = 0;
2104 
2105 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2106 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2107 		    boot_state->performance_levels[0].sclk) {
2108 			pi->smc_state_table.GraphicsBootLevel = level;
2109 			break;
2110 		}
2111 	}
2112 
2113 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2114 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2115 		    boot_state->performance_levels[0].mclk) {
2116 			pi->smc_state_table.MemoryBootLevel = level;
2117 			break;
2118 		}
2119 	}
2120 }
2121 
2122 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2123 {
2124 	u32 i;
2125 	u32 mask_value = 0;
2126 
2127 	for (i = dpm_table->count; i > 0; i--) {
2128 		mask_value = mask_value << 1;
2129 		if (dpm_table->dpm_levels[i-1].enabled)
2130 			mask_value |= 0x1;
2131 		else
2132 			mask_value &= 0xFFFFFFFE;
2133 	}
2134 
2135 	return mask_value;
2136 }
2137 
2138 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2139 				       SMU7_Discrete_DpmTable *table)
2140 {
2141 	struct ci_power_info *pi = ci_get_pi(rdev);
2142 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2143 	u32 i;
2144 
2145 	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2146 		table->LinkLevel[i].PcieGenSpeed =
2147 			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2148 		table->LinkLevel[i].PcieLaneCount =
2149 			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2150 		table->LinkLevel[i].EnabledForActivity = 1;
2151 		table->LinkLevel[i].DownT = cpu_to_be32(5);
2152 		table->LinkLevel[i].UpT = cpu_to_be32(30);
2153 	}
2154 
2155 	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2156 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2157 		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2158 }
2159 
2160 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2161 				     SMU7_Discrete_DpmTable *table)
2162 {
2163 	u32 count;
2164 	struct atom_clock_dividers dividers;
2165 	int ret = -EINVAL;
2166 
2167 	table->UvdLevelCount =
2168 		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2169 
2170 	for (count = 0; count < table->UvdLevelCount; count++) {
2171 		table->UvdLevel[count].VclkFrequency =
2172 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2173 		table->UvdLevel[count].DclkFrequency =
2174 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2175 		table->UvdLevel[count].MinVddc =
2176 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2177 		table->UvdLevel[count].MinVddcPhases = 1;
2178 
2179 		ret = radeon_atom_get_clock_dividers(rdev,
2180 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2181 						     table->UvdLevel[count].VclkFrequency, false, &dividers);
2182 		if (ret)
2183 			return ret;
2184 
2185 		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2186 
2187 		ret = radeon_atom_get_clock_dividers(rdev,
2188 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2189 						     table->UvdLevel[count].DclkFrequency, false, &dividers);
2190 		if (ret)
2191 			return ret;
2192 
2193 		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2194 
2195 		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2196 		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2197 		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2198 	}
2199 
2200 	return ret;
2201 }
2202 
2203 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2204 				     SMU7_Discrete_DpmTable *table)
2205 {
2206 	u32 count;
2207 	struct atom_clock_dividers dividers;
2208 	int ret = -EINVAL;
2209 
2210 	table->VceLevelCount =
2211 		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2212 
2213 	for (count = 0; count < table->VceLevelCount; count++) {
2214 		table->VceLevel[count].Frequency =
2215 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2216 		table->VceLevel[count].MinVoltage =
2217 			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2218 		table->VceLevel[count].MinPhases = 1;
2219 
2220 		ret = radeon_atom_get_clock_dividers(rdev,
2221 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2222 						     table->VceLevel[count].Frequency, false, &dividers);
2223 		if (ret)
2224 			return ret;
2225 
2226 		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2227 
2228 		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2229 		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2230 	}
2231 
2232 	return ret;
2233 
2234 }
2235 
2236 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2237 				     SMU7_Discrete_DpmTable *table)
2238 {
2239 	u32 count;
2240 	struct atom_clock_dividers dividers;
2241 	int ret = -EINVAL;
2242 
2243 	table->AcpLevelCount = (u8)
2244 		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2245 
2246 	for (count = 0; count < table->AcpLevelCount; count++) {
2247 		table->AcpLevel[count].Frequency =
2248 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2249 		table->AcpLevel[count].MinVoltage =
2250 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2251 		table->AcpLevel[count].MinPhases = 1;
2252 
2253 		ret = radeon_atom_get_clock_dividers(rdev,
2254 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2255 						     table->AcpLevel[count].Frequency, false, &dividers);
2256 		if (ret)
2257 			return ret;
2258 
2259 		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2260 
2261 		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2262 		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2263 	}
2264 
2265 	return ret;
2266 }
2267 
2268 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2269 				      SMU7_Discrete_DpmTable *table)
2270 {
2271 	u32 count;
2272 	struct atom_clock_dividers dividers;
2273 	int ret = -EINVAL;
2274 
2275 	table->SamuLevelCount =
2276 		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2277 
2278 	for (count = 0; count < table->SamuLevelCount; count++) {
2279 		table->SamuLevel[count].Frequency =
2280 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2281 		table->SamuLevel[count].MinVoltage =
2282 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2283 		table->SamuLevel[count].MinPhases = 1;
2284 
2285 		ret = radeon_atom_get_clock_dividers(rdev,
2286 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2287 						     table->SamuLevel[count].Frequency, false, &dividers);
2288 		if (ret)
2289 			return ret;
2290 
2291 		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2292 
2293 		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2294 		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2295 	}
2296 
2297 	return ret;
2298 }
2299 
2300 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2301 				    u32 memory_clock,
2302 				    SMU7_Discrete_MemoryLevel *mclk,
2303 				    bool strobe_mode,
2304 				    bool dll_state_on)
2305 {
2306 	struct ci_power_info *pi = ci_get_pi(rdev);
2307 	u32  dll_cntl = pi->clock_registers.dll_cntl;
2308 	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2309 	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2310 	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2311 	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2312 	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2313 	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2314 	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2315 	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2316 	struct atom_mpll_param mpll_param;
2317 	int ret;
2318 
2319 	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2320 	if (ret)
2321 		return ret;
2322 
2323 	mpll_func_cntl &= ~BWCTRL_MASK;
2324 	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2325 
2326 	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2327 	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2328 		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2329 
2330 	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2331 	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2332 
2333 	if (pi->mem_gddr5) {
2334 		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2335 		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2336 			YCLK_POST_DIV(mpll_param.post_div);
2337 	}
2338 
2339 	if (pi->caps_mclk_ss_support) {
2340 		struct radeon_atom_ss ss;
2341 		u32 freq_nom;
2342 		u32 tmp;
2343 		u32 reference_clock = rdev->clock.mpll.reference_freq;
2344 
2345 		if (pi->mem_gddr5)
2346 			freq_nom = memory_clock * 4;
2347 		else
2348 			freq_nom = memory_clock * 2;
2349 
2350 		tmp = (freq_nom / reference_clock);
2351 		tmp = tmp * tmp;
2352 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2353 						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2354 			u32 clks = reference_clock * 5 / ss.rate;
2355 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2356 
2357 			mpll_ss1 &= ~CLKV_MASK;
2358 			mpll_ss1 |= CLKV(clkv);
2359 
2360 			mpll_ss2 &= ~CLKS_MASK;
2361 			mpll_ss2 |= CLKS(clks);
2362 		}
2363 	}
2364 
2365 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2366 	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2367 
2368 	if (dll_state_on)
2369 		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2370 	else
2371 		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2372 
2373 	mclk->MclkFrequency = memory_clock;
2374 	mclk->MpllFuncCntl = mpll_func_cntl;
2375 	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2376 	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2377 	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2378 	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2379 	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2380 	mclk->DllCntl = dll_cntl;
2381 	mclk->MpllSs1 = mpll_ss1;
2382 	mclk->MpllSs2 = mpll_ss2;
2383 
2384 	return 0;
2385 }
2386 
2387 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2388 					   u32 memory_clock,
2389 					   SMU7_Discrete_MemoryLevel *memory_level)
2390 {
2391 	struct ci_power_info *pi = ci_get_pi(rdev);
2392 	int ret;
2393 	bool dll_state_on;
2394 
2395 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2396 		ret = ci_get_dependency_volt_by_clk(rdev,
2397 						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2398 						    memory_clock, &memory_level->MinVddc);
2399 		if (ret)
2400 			return ret;
2401 	}
2402 
2403 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2404 		ret = ci_get_dependency_volt_by_clk(rdev,
2405 						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2406 						    memory_clock, &memory_level->MinVddci);
2407 		if (ret)
2408 			return ret;
2409 	}
2410 
2411 	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2412 		ret = ci_get_dependency_volt_by_clk(rdev,
2413 						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2414 						    memory_clock, &memory_level->MinMvdd);
2415 		if (ret)
2416 			return ret;
2417 	}
2418 
2419 	memory_level->MinVddcPhases = 1;
2420 
2421 	if (pi->vddc_phase_shed_control)
2422 		ci_populate_phase_value_based_on_mclk(rdev,
2423 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2424 						      memory_clock,
2425 						      &memory_level->MinVddcPhases);
2426 
2427 	memory_level->EnabledForThrottle = 1;
2428 	memory_level->EnabledForActivity = 1;
2429 	memory_level->UpH = 0;
2430 	memory_level->DownH = 100;
2431 	memory_level->VoltageDownH = 0;
2432 	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2433 
2434 	memory_level->StutterEnable = false;
2435 	memory_level->StrobeEnable = false;
2436 	memory_level->EdcReadEnable = false;
2437 	memory_level->EdcWriteEnable = false;
2438 	memory_level->RttEnable = false;
2439 
2440 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2441 
2442 	if (pi->mclk_stutter_mode_threshold &&
2443 	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2444 	    (pi->uvd_enabled == false) &&
2445 	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2446 	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2447 		memory_level->StutterEnable = true;
2448 
2449 	if (pi->mclk_strobe_mode_threshold &&
2450 	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2451 		memory_level->StrobeEnable = 1;
2452 
2453 	if (pi->mem_gddr5) {
2454 		memory_level->StrobeRatio =
2455 			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2456 		if (pi->mclk_edc_enable_threshold &&
2457 		    (memory_clock > pi->mclk_edc_enable_threshold))
2458 			memory_level->EdcReadEnable = true;
2459 
2460 		if (pi->mclk_edc_wr_enable_threshold &&
2461 		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2462 			memory_level->EdcWriteEnable = true;
2463 
2464 		if (memory_level->StrobeEnable) {
2465 			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2466 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2467 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2468 			else
2469 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2470 		} else {
2471 			dll_state_on = pi->dll_default_on;
2472 		}
2473 	} else {
2474 		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2475 		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2476 	}
2477 
2478 	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2479 	if (ret)
2480 		return ret;
2481 
2482 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2483 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2484         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2485         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2486 
2487 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2488 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2489 	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2490 	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2491 	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2492 	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2493 	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2494 	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2495 	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2496 	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2497 	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2498 
2499 	return 0;
2500 }
2501 
2502 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2503 				      SMU7_Discrete_DpmTable *table)
2504 {
2505 	struct ci_power_info *pi = ci_get_pi(rdev);
2506 	struct atom_clock_dividers dividers;
2507 	SMU7_Discrete_VoltageLevel voltage_level;
2508 	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2509 	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2510 	u32 dll_cntl = pi->clock_registers.dll_cntl;
2511 	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2512 	int ret;
2513 
2514 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2515 
2516 	if (pi->acpi_vddc)
2517 		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2518 	else
2519 		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2520 
2521 	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2522 
2523 	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2524 
2525 	ret = radeon_atom_get_clock_dividers(rdev,
2526 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2527 					     table->ACPILevel.SclkFrequency, false, &dividers);
2528 	if (ret)
2529 		return ret;
2530 
2531 	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2532 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2533 	table->ACPILevel.DeepSleepDivId = 0;
2534 
2535 	spll_func_cntl &= ~SPLL_PWRON;
2536 	spll_func_cntl |= SPLL_RESET;
2537 
2538 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2539 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2540 
2541 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2542 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2543 	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2544 	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2545 	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2546 	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2547 	table->ACPILevel.CcPwrDynRm = 0;
2548 	table->ACPILevel.CcPwrDynRm1 = 0;
2549 
2550 	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2551 	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2552 	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2553 	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2554 	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2555 	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2556 	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2557 	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2558 	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2559 	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2560 	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2561 
2562 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2563 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2564 
2565 	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2566 		if (pi->acpi_vddci)
2567 			table->MemoryACPILevel.MinVddci =
2568 				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2569 		else
2570 			table->MemoryACPILevel.MinVddci =
2571 				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2572 	}
2573 
2574 	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2575 		table->MemoryACPILevel.MinMvdd = 0;
2576 	else
2577 		table->MemoryACPILevel.MinMvdd =
2578 			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2579 
2580 	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2581 	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2582 
2583 	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2584 
2585 	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2586 	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2587 	table->MemoryACPILevel.MpllAdFuncCntl =
2588 		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2589 	table->MemoryACPILevel.MpllDqFuncCntl =
2590 		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2591 	table->MemoryACPILevel.MpllFuncCntl =
2592 		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2593 	table->MemoryACPILevel.MpllFuncCntl_1 =
2594 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2595 	table->MemoryACPILevel.MpllFuncCntl_2 =
2596 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2597 	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2598 	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2599 
2600 	table->MemoryACPILevel.EnabledForThrottle = 0;
2601 	table->MemoryACPILevel.EnabledForActivity = 0;
2602 	table->MemoryACPILevel.UpH = 0;
2603 	table->MemoryACPILevel.DownH = 100;
2604 	table->MemoryACPILevel.VoltageDownH = 0;
2605 	table->MemoryACPILevel.ActivityLevel =
2606 		cpu_to_be16((u16)pi->mclk_activity_target);
2607 
2608 	table->MemoryACPILevel.StutterEnable = false;
2609 	table->MemoryACPILevel.StrobeEnable = false;
2610 	table->MemoryACPILevel.EdcReadEnable = false;
2611 	table->MemoryACPILevel.EdcWriteEnable = false;
2612 	table->MemoryACPILevel.RttEnable = false;
2613 
2614 	return 0;
2615 }
2616 
2617 
2618 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2619 {
2620 	struct ci_power_info *pi = ci_get_pi(rdev);
2621 	struct ci_ulv_parm *ulv = &pi->ulv;
2622 
2623 	if (ulv->supported) {
2624 		if (enable)
2625 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2626 				0 : -EINVAL;
2627 		else
2628 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2629 				0 : -EINVAL;
2630 	}
2631 
2632 	return 0;
2633 }
2634 
2635 static int ci_populate_ulv_level(struct radeon_device *rdev,
2636 				 SMU7_Discrete_Ulv *state)
2637 {
2638 	struct ci_power_info *pi = ci_get_pi(rdev);
2639 	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2640 
2641 	state->CcPwrDynRm = 0;
2642 	state->CcPwrDynRm1 = 0;
2643 
2644 	if (ulv_voltage == 0) {
2645 		pi->ulv.supported = false;
2646 		return 0;
2647 	}
2648 
2649 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2650 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2651 			state->VddcOffset = 0;
2652 		else
2653 			state->VddcOffset =
2654 				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2655 	} else {
2656 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2657 			state->VddcOffsetVid = 0;
2658 		else
2659 			state->VddcOffsetVid = (u8)
2660 				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2661 				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2662 	}
2663 	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2664 
2665 	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2666 	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2667 	state->VddcOffset = cpu_to_be16(state->VddcOffset);
2668 
2669 	return 0;
2670 }
2671 
2672 static int ci_calculate_sclk_params(struct radeon_device *rdev,
2673 				    u32 engine_clock,
2674 				    SMU7_Discrete_GraphicsLevel *sclk)
2675 {
2676 	struct ci_power_info *pi = ci_get_pi(rdev);
2677 	struct atom_clock_dividers dividers;
2678 	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2679 	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2680 	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2681 	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2682 	u32 reference_clock = rdev->clock.spll.reference_freq;
2683 	u32 reference_divider;
2684 	u32 fbdiv;
2685 	int ret;
2686 
2687 	ret = radeon_atom_get_clock_dividers(rdev,
2688 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2689 					     engine_clock, false, &dividers);
2690 	if (ret)
2691 		return ret;
2692 
2693 	reference_divider = 1 + dividers.ref_div;
2694 	fbdiv = dividers.fb_div & 0x3FFFFFF;
2695 
2696 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2697 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2698         spll_func_cntl_3 |= SPLL_DITHEN;
2699 
2700 	if (pi->caps_sclk_ss_support) {
2701 		struct radeon_atom_ss ss;
2702 		u32 vco_freq = engine_clock * dividers.post_div;
2703 
2704 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2705 						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2706 			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2707 			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2708 
2709 			cg_spll_spread_spectrum &= ~CLK_S_MASK;
2710 			cg_spll_spread_spectrum |= CLK_S(clk_s);
2711 			cg_spll_spread_spectrum |= SSEN;
2712 
2713 			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2714 			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2715 		}
2716 	}
2717 
2718 	sclk->SclkFrequency = engine_clock;
2719 	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2720 	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2721 	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2722 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
2723 	sclk->SclkDid = (u8)dividers.post_divider;
2724 
2725 	return 0;
2726 }
2727 
2728 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2729 					    u32 engine_clock,
2730 					    u16 sclk_activity_level_t,
2731 					    SMU7_Discrete_GraphicsLevel *graphic_level)
2732 {
2733 	struct ci_power_info *pi = ci_get_pi(rdev);
2734 	int ret;
2735 
2736 	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2737 	if (ret)
2738 		return ret;
2739 
2740 	ret = ci_get_dependency_volt_by_clk(rdev,
2741 					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2742 					    engine_clock, &graphic_level->MinVddc);
2743 	if (ret)
2744 		return ret;
2745 
2746 	graphic_level->SclkFrequency = engine_clock;
2747 
2748 	graphic_level->Flags =  0;
2749 	graphic_level->MinVddcPhases = 1;
2750 
2751 	if (pi->vddc_phase_shed_control)
2752 		ci_populate_phase_value_based_on_sclk(rdev,
2753 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2754 						      engine_clock,
2755 						      &graphic_level->MinVddcPhases);
2756 
2757 	graphic_level->ActivityLevel = sclk_activity_level_t;
2758 
2759 	graphic_level->CcPwrDynRm = 0;
2760 	graphic_level->CcPwrDynRm1 = 0;
2761 	graphic_level->EnabledForActivity = 1;
2762 	graphic_level->EnabledForThrottle = 1;
2763 	graphic_level->UpH = 0;
2764 	graphic_level->DownH = 0;
2765 	graphic_level->VoltageDownH = 0;
2766 	graphic_level->PowerThrottle = 0;
2767 
2768 	if (pi->caps_sclk_ds)
2769 		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2770 										   engine_clock,
2771 										   CISLAND_MINIMUM_ENGINE_CLOCK);
2772 
2773 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2774 
2775 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2776         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2777 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2778 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2779 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2780 	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2781 	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2782 	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2783 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2784 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2785 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2786 
2787 	return 0;
2788 }
2789 
2790 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2791 {
2792 	struct ci_power_info *pi = ci_get_pi(rdev);
2793 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2794 	u32 level_array_address = pi->dpm_table_start +
2795 		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2796 	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2797 		SMU7_MAX_LEVELS_GRAPHICS;
2798 	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2799 	u32 i, ret;
2800 
2801 	memset(levels, 0, level_array_size);
2802 
2803 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
2804 		ret = ci_populate_single_graphic_level(rdev,
2805 						       dpm_table->sclk_table.dpm_levels[i].value,
2806 						       (u16)pi->activity_target[i],
2807 						       &pi->smc_state_table.GraphicsLevel[i]);
2808 		if (ret)
2809 			return ret;
2810 		if (i == (dpm_table->sclk_table.count - 1))
2811 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2812 				PPSMC_DISPLAY_WATERMARK_HIGH;
2813 	}
2814 
2815 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2816 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2817 		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2818 
2819 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2820 				   (u8 *)levels, level_array_size,
2821 				   pi->sram_end);
2822 	if (ret)
2823 		return ret;
2824 
2825 	return 0;
2826 }
2827 
2828 static int ci_populate_ulv_state(struct radeon_device *rdev,
2829 				 SMU7_Discrete_Ulv *ulv_level)
2830 {
2831 	return ci_populate_ulv_level(rdev, ulv_level);
2832 }
2833 
2834 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2835 {
2836 	struct ci_power_info *pi = ci_get_pi(rdev);
2837 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2838 	u32 level_array_address = pi->dpm_table_start +
2839 		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2840 	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2841 		SMU7_MAX_LEVELS_MEMORY;
2842 	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2843 	u32 i, ret;
2844 
2845 	memset(levels, 0, level_array_size);
2846 
2847 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
2848 		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2849 			return -EINVAL;
2850 		ret = ci_populate_single_memory_level(rdev,
2851 						      dpm_table->mclk_table.dpm_levels[i].value,
2852 						      &pi->smc_state_table.MemoryLevel[i]);
2853 		if (ret)
2854 			return ret;
2855 	}
2856 
2857 	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2858 
2859 	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2860 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2861 		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2862 
2863 	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2864 		PPSMC_DISPLAY_WATERMARK_HIGH;
2865 
2866 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2867 				   (u8 *)levels, level_array_size,
2868 				   pi->sram_end);
2869 	if (ret)
2870 		return ret;
2871 
2872 	return 0;
2873 }
2874 
2875 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2876 				      struct ci_single_dpm_table* dpm_table,
2877 				      u32 count)
2878 {
2879 	u32 i;
2880 
2881 	dpm_table->count = count;
2882 	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2883 		dpm_table->dpm_levels[i].enabled = false;
2884 }
2885 
2886 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2887 				      u32 index, u32 pcie_gen, u32 pcie_lanes)
2888 {
2889 	dpm_table->dpm_levels[index].value = pcie_gen;
2890 	dpm_table->dpm_levels[index].param1 = pcie_lanes;
2891 	dpm_table->dpm_levels[index].enabled = true;
2892 }
2893 
2894 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2895 {
2896 	struct ci_power_info *pi = ci_get_pi(rdev);
2897 
2898 	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2899 		return -EINVAL;
2900 
2901 	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2902 		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2903 		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2904 	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2905 		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2906 		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2907 	}
2908 
2909 	ci_reset_single_dpm_table(rdev,
2910 				  &pi->dpm_table.pcie_speed_table,
2911 				  SMU7_MAX_LEVELS_LINK);
2912 
2913 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2914 				  pi->pcie_gen_powersaving.min,
2915 				  pi->pcie_lane_powersaving.min);
2916 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2917 				  pi->pcie_gen_performance.min,
2918 				  pi->pcie_lane_performance.min);
2919 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2920 				  pi->pcie_gen_powersaving.min,
2921 				  pi->pcie_lane_powersaving.max);
2922 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2923 				  pi->pcie_gen_performance.min,
2924 				  pi->pcie_lane_performance.max);
2925 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2926 				  pi->pcie_gen_powersaving.max,
2927 				  pi->pcie_lane_powersaving.max);
2928 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2929 				  pi->pcie_gen_performance.max,
2930 				  pi->pcie_lane_performance.max);
2931 
2932 	pi->dpm_table.pcie_speed_table.count = 6;
2933 
2934 	return 0;
2935 }
2936 
2937 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2938 {
2939 	struct ci_power_info *pi = ci_get_pi(rdev);
2940 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2941 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2942 	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2943 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2944 	struct radeon_cac_leakage_table *std_voltage_table =
2945 		&rdev->pm.dpm.dyn_state.cac_leakage_table;
2946 	u32 i;
2947 
2948 	if (allowed_sclk_vddc_table == NULL)
2949 		return -EINVAL;
2950 	if (allowed_sclk_vddc_table->count < 1)
2951 		return -EINVAL;
2952 	if (allowed_mclk_table == NULL)
2953 		return -EINVAL;
2954 	if (allowed_mclk_table->count < 1)
2955 		return -EINVAL;
2956 
2957 	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2958 
2959 	ci_reset_single_dpm_table(rdev,
2960 				  &pi->dpm_table.sclk_table,
2961 				  SMU7_MAX_LEVELS_GRAPHICS);
2962 	ci_reset_single_dpm_table(rdev,
2963 				  &pi->dpm_table.mclk_table,
2964 				  SMU7_MAX_LEVELS_MEMORY);
2965 	ci_reset_single_dpm_table(rdev,
2966 				  &pi->dpm_table.vddc_table,
2967 				  SMU7_MAX_LEVELS_VDDC);
2968 	ci_reset_single_dpm_table(rdev,
2969 				  &pi->dpm_table.vddci_table,
2970 				  SMU7_MAX_LEVELS_VDDCI);
2971 	ci_reset_single_dpm_table(rdev,
2972 				  &pi->dpm_table.mvdd_table,
2973 				  SMU7_MAX_LEVELS_MVDD);
2974 
2975 	pi->dpm_table.sclk_table.count = 0;
2976 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2977 		if ((i == 0) ||
2978 		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2979 		     allowed_sclk_vddc_table->entries[i].clk)) {
2980 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2981 				allowed_sclk_vddc_table->entries[i].clk;
2982 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2983 			pi->dpm_table.sclk_table.count++;
2984 		}
2985 	}
2986 
2987 	pi->dpm_table.mclk_table.count = 0;
2988 	for (i = 0; i < allowed_mclk_table->count; i++) {
2989 		if ((i==0) ||
2990 		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2991 		     allowed_mclk_table->entries[i].clk)) {
2992 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
2993 				allowed_mclk_table->entries[i].clk;
2994 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
2995 			pi->dpm_table.mclk_table.count++;
2996 		}
2997 	}
2998 
2999 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3000 		pi->dpm_table.vddc_table.dpm_levels[i].value =
3001 			allowed_sclk_vddc_table->entries[i].v;
3002 		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3003 			std_voltage_table->entries[i].leakage;
3004 		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3005 	}
3006 	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3007 
3008 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3009 	if (allowed_mclk_table) {
3010 		for (i = 0; i < allowed_mclk_table->count; i++) {
3011 			pi->dpm_table.vddci_table.dpm_levels[i].value =
3012 				allowed_mclk_table->entries[i].v;
3013 			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3014 		}
3015 		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3016 	}
3017 
3018 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3019 	if (allowed_mclk_table) {
3020 		for (i = 0; i < allowed_mclk_table->count; i++) {
3021 			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3022 				allowed_mclk_table->entries[i].v;
3023 			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3024 		}
3025 		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3026 	}
3027 
3028 	ci_setup_default_pcie_tables(rdev);
3029 
3030 	return 0;
3031 }
3032 
3033 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3034 			      u32 value, u32 *boot_level)
3035 {
3036 	u32 i;
3037 	int ret = -EINVAL;
3038 
3039 	for(i = 0; i < table->count; i++) {
3040 		if (value == table->dpm_levels[i].value) {
3041 			*boot_level = i;
3042 			ret = 0;
3043 		}
3044 	}
3045 
3046 	return ret;
3047 }
3048 
3049 static int ci_init_smc_table(struct radeon_device *rdev)
3050 {
3051 	struct ci_power_info *pi = ci_get_pi(rdev);
3052 	struct ci_ulv_parm *ulv = &pi->ulv;
3053 	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3054 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3055 	int ret;
3056 
3057 	ret = ci_setup_default_dpm_tables(rdev);
3058 	if (ret)
3059 		return ret;
3060 
3061 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3062 		ci_populate_smc_voltage_tables(rdev, table);
3063 
3064 	ci_init_fps_limits(rdev);
3065 
3066 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3067 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3068 
3069 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3070 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3071 
3072 	if (pi->mem_gddr5)
3073 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3074 
3075 	if (ulv->supported) {
3076 		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3077 		if (ret)
3078 			return ret;
3079 		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3080 	}
3081 
3082 	ret = ci_populate_all_graphic_levels(rdev);
3083 	if (ret)
3084 		return ret;
3085 
3086 	ret = ci_populate_all_memory_levels(rdev);
3087 	if (ret)
3088 		return ret;
3089 
3090 	ci_populate_smc_link_level(rdev, table);
3091 
3092 	ret = ci_populate_smc_acpi_level(rdev, table);
3093 	if (ret)
3094 		return ret;
3095 
3096 	ret = ci_populate_smc_vce_level(rdev, table);
3097 	if (ret)
3098 		return ret;
3099 
3100 	ret = ci_populate_smc_acp_level(rdev, table);
3101 	if (ret)
3102 		return ret;
3103 
3104 	ret = ci_populate_smc_samu_level(rdev, table);
3105 	if (ret)
3106 		return ret;
3107 
3108 	ret = ci_do_program_memory_timing_parameters(rdev);
3109 	if (ret)
3110 		return ret;
3111 
3112 	ret = ci_populate_smc_uvd_level(rdev, table);
3113 	if (ret)
3114 		return ret;
3115 
3116 	table->UvdBootLevel  = 0;
3117 	table->VceBootLevel  = 0;
3118 	table->AcpBootLevel  = 0;
3119 	table->SamuBootLevel  = 0;
3120 	table->GraphicsBootLevel  = 0;
3121 	table->MemoryBootLevel  = 0;
3122 
3123 	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3124 				 pi->vbios_boot_state.sclk_bootup_value,
3125 				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3126 
3127 	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3128 				 pi->vbios_boot_state.mclk_bootup_value,
3129 				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3130 
3131 	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3132 	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3133 	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3134 
3135 	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3136 
3137 	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3138 	if (ret)
3139 		return ret;
3140 
3141 	table->UVDInterval = 1;
3142 	table->VCEInterval = 1;
3143 	table->ACPInterval = 1;
3144 	table->SAMUInterval = 1;
3145 	table->GraphicsVoltageChangeEnable = 1;
3146 	table->GraphicsThermThrottleEnable = 1;
3147 	table->GraphicsInterval = 1;
3148 	table->VoltageInterval = 1;
3149 	table->ThermalInterval = 1;
3150 	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3151 					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3152 	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3153 					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3154 	table->MemoryVoltageChangeEnable = 1;
3155 	table->MemoryInterval = 1;
3156 	table->VoltageResponseTime = 0;
3157 	table->VddcVddciDelta = 4000;
3158 	table->PhaseResponseTime = 0;
3159 	table->MemoryThermThrottleEnable = 1;
3160 	table->PCIeBootLinkLevel = 0;
3161 	table->PCIeGenInterval = 1;
3162 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3163 		table->SVI2Enable  = 1;
3164 	else
3165 		table->SVI2Enable  = 0;
3166 
3167 	table->ThermGpio = 17;
3168 	table->SclkStepSize = 0x4000;
3169 
3170 	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3171 	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3172 	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3173 	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3174 	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3175 	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3176 	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3177 	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3178 	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3179 	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3180 	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3181 	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3182 	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3183 	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3184 
3185 	ret = ci_copy_bytes_to_smc(rdev,
3186 				   pi->dpm_table_start +
3187 				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3188 				   (u8 *)&table->SystemFlags,
3189 				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3190 				   pi->sram_end);
3191 	if (ret)
3192 		return ret;
3193 
3194 	return 0;
3195 }
3196 
3197 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3198 				      struct ci_single_dpm_table *dpm_table,
3199 				      u32 low_limit, u32 high_limit)
3200 {
3201 	u32 i;
3202 
3203 	for (i = 0; i < dpm_table->count; i++) {
3204 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3205 		    (dpm_table->dpm_levels[i].value > high_limit))
3206 			dpm_table->dpm_levels[i].enabled = false;
3207 		else
3208 			dpm_table->dpm_levels[i].enabled = true;
3209 	}
3210 }
3211 
3212 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3213 				    u32 speed_low, u32 lanes_low,
3214 				    u32 speed_high, u32 lanes_high)
3215 {
3216 	struct ci_power_info *pi = ci_get_pi(rdev);
3217 	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3218 	u32 i, j;
3219 
3220 	for (i = 0; i < pcie_table->count; i++) {
3221 		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3222 		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3223 		    (pcie_table->dpm_levels[i].value > speed_high) ||
3224 		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3225 			pcie_table->dpm_levels[i].enabled = false;
3226 		else
3227 			pcie_table->dpm_levels[i].enabled = true;
3228 	}
3229 
3230 	for (i = 0; i < pcie_table->count; i++) {
3231 		if (pcie_table->dpm_levels[i].enabled) {
3232 			for (j = i + 1; j < pcie_table->count; j++) {
3233 				if (pcie_table->dpm_levels[j].enabled) {
3234 					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3235 					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3236 						pcie_table->dpm_levels[j].enabled = false;
3237 				}
3238 			}
3239 		}
3240 	}
3241 }
3242 
3243 static int ci_trim_dpm_states(struct radeon_device *rdev,
3244 			      struct radeon_ps *radeon_state)
3245 {
3246 	struct ci_ps *state = ci_get_ps(radeon_state);
3247 	struct ci_power_info *pi = ci_get_pi(rdev);
3248 	u32 high_limit_count;
3249 
3250 	if (state->performance_level_count < 1)
3251 		return -EINVAL;
3252 
3253 	if (state->performance_level_count == 1)
3254 		high_limit_count = 0;
3255 	else
3256 		high_limit_count = 1;
3257 
3258 	ci_trim_single_dpm_states(rdev,
3259 				  &pi->dpm_table.sclk_table,
3260 				  state->performance_levels[0].sclk,
3261 				  state->performance_levels[high_limit_count].sclk);
3262 
3263 	ci_trim_single_dpm_states(rdev,
3264 				  &pi->dpm_table.mclk_table,
3265 				  state->performance_levels[0].mclk,
3266 				  state->performance_levels[high_limit_count].mclk);
3267 
3268 	ci_trim_pcie_dpm_states(rdev,
3269 				state->performance_levels[0].pcie_gen,
3270 				state->performance_levels[0].pcie_lane,
3271 				state->performance_levels[high_limit_count].pcie_gen,
3272 				state->performance_levels[high_limit_count].pcie_lane);
3273 
3274 	return 0;
3275 }
3276 
3277 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3278 {
3279 	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3280 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3281 	struct radeon_clock_voltage_dependency_table *vddc_table =
3282 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3283 	u32 requested_voltage = 0;
3284 	u32 i;
3285 
3286 	if (disp_voltage_table == NULL)
3287 		return -EINVAL;
3288 	if (!disp_voltage_table->count)
3289 		return -EINVAL;
3290 
3291 	for (i = 0; i < disp_voltage_table->count; i++) {
3292 		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3293 			requested_voltage = disp_voltage_table->entries[i].v;
3294 	}
3295 
3296 	for (i = 0; i < vddc_table->count; i++) {
3297 		if (requested_voltage <= vddc_table->entries[i].v) {
3298 			requested_voltage = vddc_table->entries[i].v;
3299 			return (ci_send_msg_to_smc_with_parameter(rdev,
3300 								  PPSMC_MSG_VddC_Request,
3301 								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3302 				0 : -EINVAL;
3303 		}
3304 	}
3305 
3306 	return -EINVAL;
3307 }
3308 
3309 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3310 {
3311 	struct ci_power_info *pi = ci_get_pi(rdev);
3312 	PPSMC_Result result;
3313 
3314 	if (!pi->sclk_dpm_key_disabled) {
3315 		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3316 			result = ci_send_msg_to_smc_with_parameter(rdev,
3317 								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3318 								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3319 			if (result != PPSMC_Result_OK)
3320 				return -EINVAL;
3321 		}
3322 	}
3323 
3324 	if (!pi->mclk_dpm_key_disabled) {
3325 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3326 			result = ci_send_msg_to_smc_with_parameter(rdev,
3327 								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3328 								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3329 			if (result != PPSMC_Result_OK)
3330 				return -EINVAL;
3331 		}
3332 	}
3333 
3334 	if (!pi->pcie_dpm_key_disabled) {
3335 		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3336 			result = ci_send_msg_to_smc_with_parameter(rdev,
3337 								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3338 								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3339 			if (result != PPSMC_Result_OK)
3340 				return -EINVAL;
3341 		}
3342 	}
3343 
3344 	ci_apply_disp_minimum_voltage_request(rdev);
3345 
3346 	return 0;
3347 }
3348 
3349 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3350 						   struct radeon_ps *radeon_state)
3351 {
3352 	struct ci_power_info *pi = ci_get_pi(rdev);
3353 	struct ci_ps *state = ci_get_ps(radeon_state);
3354 	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3355 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3356 	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3357 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3358 	u32 i;
3359 
3360 	pi->need_update_smu7_dpm_table = 0;
3361 
3362 	for (i = 0; i < sclk_table->count; i++) {
3363 		if (sclk == sclk_table->dpm_levels[i].value)
3364 			break;
3365 	}
3366 
3367 	if (i >= sclk_table->count) {
3368 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3369 	} else {
3370 		/* XXX check display min clock requirements */
3371 		if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3372 			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3373 	}
3374 
3375 	for (i = 0; i < mclk_table->count; i++) {
3376 		if (mclk == mclk_table->dpm_levels[i].value)
3377 			break;
3378 	}
3379 
3380 	if (i >= mclk_table->count)
3381 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3382 
3383 	if (rdev->pm.dpm.current_active_crtc_count !=
3384 	    rdev->pm.dpm.new_active_crtc_count)
3385 		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3386 }
3387 
3388 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3389 						       struct radeon_ps *radeon_state)
3390 {
3391 	struct ci_power_info *pi = ci_get_pi(rdev);
3392 	struct ci_ps *state = ci_get_ps(radeon_state);
3393 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3394 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3395 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3396 	int ret;
3397 
3398 	if (!pi->need_update_smu7_dpm_table)
3399 		return 0;
3400 
3401 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3402 		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3403 
3404 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3405 		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3406 
3407 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3408 		ret = ci_populate_all_graphic_levels(rdev);
3409 		if (ret)
3410 			return ret;
3411 	}
3412 
3413 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3414 		ret = ci_populate_all_memory_levels(rdev);
3415 		if (ret)
3416 			return ret;
3417 	}
3418 
3419 	return 0;
3420 }
3421 
3422 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3423 {
3424 	struct ci_power_info *pi = ci_get_pi(rdev);
3425 	const struct radeon_clock_and_voltage_limits *max_limits;
3426 	int i;
3427 
3428 	if (rdev->pm.dpm.ac_power)
3429 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3430 	else
3431 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3432 
3433 	if (enable) {
3434 		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3435 
3436 		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3437 			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3438 				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3439 
3440 				if (!pi->caps_uvd_dpm)
3441 					break;
3442 			}
3443 		}
3444 
3445 		ci_send_msg_to_smc_with_parameter(rdev,
3446 						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3447 						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3448 
3449 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3450 			pi->uvd_enabled = true;
3451 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3452 			ci_send_msg_to_smc_with_parameter(rdev,
3453 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3454 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3455 		}
3456 	} else {
3457 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3458 			pi->uvd_enabled = false;
3459 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3460 			ci_send_msg_to_smc_with_parameter(rdev,
3461 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3462 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3463 		}
3464 	}
3465 
3466 	return (ci_send_msg_to_smc(rdev, enable ?
3467 				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3468 		0 : -EINVAL;
3469 }
3470 
3471 #if 0
3472 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3473 {
3474 	struct ci_power_info *pi = ci_get_pi(rdev);
3475 	const struct radeon_clock_and_voltage_limits *max_limits;
3476 	int i;
3477 
3478 	if (rdev->pm.dpm.ac_power)
3479 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3480 	else
3481 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3482 
3483 	if (enable) {
3484 		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3485 		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3486 			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3487 				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3488 
3489 				if (!pi->caps_vce_dpm)
3490 					break;
3491 			}
3492 		}
3493 
3494 		ci_send_msg_to_smc_with_parameter(rdev,
3495 						  PPSMC_MSG_VCEDPM_SetEnabledMask,
3496 						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3497 	}
3498 
3499 	return (ci_send_msg_to_smc(rdev, enable ?
3500 				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3501 		0 : -EINVAL;
3502 }
3503 
3504 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3505 {
3506 	struct ci_power_info *pi = ci_get_pi(rdev);
3507 	const struct radeon_clock_and_voltage_limits *max_limits;
3508 	int i;
3509 
3510 	if (rdev->pm.dpm.ac_power)
3511 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3512 	else
3513 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3514 
3515 	if (enable) {
3516 		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3517 		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3518 			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3519 				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3520 
3521 				if (!pi->caps_samu_dpm)
3522 					break;
3523 			}
3524 		}
3525 
3526 		ci_send_msg_to_smc_with_parameter(rdev,
3527 						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
3528 						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3529 	}
3530 	return (ci_send_msg_to_smc(rdev, enable ?
3531 				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3532 		0 : -EINVAL;
3533 }
3534 
3535 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3536 {
3537 	struct ci_power_info *pi = ci_get_pi(rdev);
3538 	const struct radeon_clock_and_voltage_limits *max_limits;
3539 	int i;
3540 
3541 	if (rdev->pm.dpm.ac_power)
3542 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3543 	else
3544 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3545 
3546 	if (enable) {
3547 		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3548 		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3549 			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3550 				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3551 
3552 				if (!pi->caps_acp_dpm)
3553 					break;
3554 			}
3555 		}
3556 
3557 		ci_send_msg_to_smc_with_parameter(rdev,
3558 						  PPSMC_MSG_ACPDPM_SetEnabledMask,
3559 						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3560 	}
3561 
3562 	return (ci_send_msg_to_smc(rdev, enable ?
3563 				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3564 		0 : -EINVAL;
3565 }
3566 #endif
3567 
3568 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3569 {
3570 	struct ci_power_info *pi = ci_get_pi(rdev);
3571 	u32 tmp;
3572 
3573 	if (!gate) {
3574 		if (pi->caps_uvd_dpm ||
3575 		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3576 			pi->smc_state_table.UvdBootLevel = 0;
3577 		else
3578 			pi->smc_state_table.UvdBootLevel =
3579 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3580 
3581 		tmp = RREG32_SMC(DPM_TABLE_475);
3582 		tmp &= ~UvdBootLevel_MASK;
3583 		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3584 		WREG32_SMC(DPM_TABLE_475, tmp);
3585 	}
3586 
3587 	return ci_enable_uvd_dpm(rdev, !gate);
3588 }
3589 
3590 #if 0
3591 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3592 {
3593 	u8 i;
3594 	u32 min_evclk = 30000; /* ??? */
3595 	struct radeon_vce_clock_voltage_dependency_table *table =
3596 		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3597 
3598 	for (i = 0; i < table->count; i++) {
3599 		if (table->entries[i].evclk >= min_evclk)
3600 			return i;
3601 	}
3602 
3603 	return table->count - 1;
3604 }
3605 
3606 static int ci_update_vce_dpm(struct radeon_device *rdev,
3607 			     struct radeon_ps *radeon_new_state,
3608 			     struct radeon_ps *radeon_current_state)
3609 {
3610 	struct ci_power_info *pi = ci_get_pi(rdev);
3611 	bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
3612 	bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
3613 	int ret = 0;
3614 	u32 tmp;
3615 
3616 	if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
3617 		if (new_vce_clock_non_zero) {
3618 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3619 
3620 			tmp = RREG32_SMC(DPM_TABLE_475);
3621 			tmp &= ~VceBootLevel_MASK;
3622 			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3623 			WREG32_SMC(DPM_TABLE_475, tmp);
3624 
3625 			ret = ci_enable_vce_dpm(rdev, true);
3626 		} else {
3627 			ret = ci_enable_vce_dpm(rdev, false);
3628 		}
3629 	}
3630 	return ret;
3631 }
3632 
3633 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3634 {
3635 	return ci_enable_samu_dpm(rdev, gate);
3636 }
3637 
3638 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3639 {
3640 	struct ci_power_info *pi = ci_get_pi(rdev);
3641 	u32 tmp;
3642 
3643 	if (!gate) {
3644 		pi->smc_state_table.AcpBootLevel = 0;
3645 
3646 		tmp = RREG32_SMC(DPM_TABLE_475);
3647 		tmp &= ~AcpBootLevel_MASK;
3648 		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3649 		WREG32_SMC(DPM_TABLE_475, tmp);
3650 	}
3651 
3652 	return ci_enable_acp_dpm(rdev, !gate);
3653 }
3654 #endif
3655 
3656 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3657 					     struct radeon_ps *radeon_state)
3658 {
3659 	struct ci_power_info *pi = ci_get_pi(rdev);
3660 	int ret;
3661 
3662 	ret = ci_trim_dpm_states(rdev, radeon_state);
3663 	if (ret)
3664 		return ret;
3665 
3666 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3667 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3668 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3669 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3670 	pi->last_mclk_dpm_enable_mask =
3671 		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3672 	if (pi->uvd_enabled) {
3673 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3674 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3675 	}
3676 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3677 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3678 
3679 	return 0;
3680 }
3681 
3682 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3683 				       u32 level_mask)
3684 {
3685 	u32 level = 0;
3686 
3687 	while ((level_mask & (1 << level)) == 0)
3688 		level++;
3689 
3690 	return level;
3691 }
3692 
3693 
3694 int ci_dpm_force_performance_level(struct radeon_device *rdev,
3695 				   enum radeon_dpm_forced_level level)
3696 {
3697 	struct ci_power_info *pi = ci_get_pi(rdev);
3698 	PPSMC_Result smc_result;
3699 	u32 tmp, levels, i;
3700 	int ret;
3701 
3702 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3703 		if ((!pi->sclk_dpm_key_disabled) &&
3704 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3705 			levels = 0;
3706 			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3707 			while (tmp >>= 1)
3708 				levels++;
3709 			if (levels) {
3710 				ret = ci_dpm_force_state_sclk(rdev, levels);
3711 				if (ret)
3712 					return ret;
3713 				for (i = 0; i < rdev->usec_timeout; i++) {
3714 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3715 					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3716 					if (tmp == levels)
3717 						break;
3718 					udelay(1);
3719 				}
3720 			}
3721 		}
3722 		if ((!pi->mclk_dpm_key_disabled) &&
3723 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3724 			levels = 0;
3725 			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3726 			while (tmp >>= 1)
3727 				levels++;
3728 			if (levels) {
3729 				ret = ci_dpm_force_state_mclk(rdev, levels);
3730 				if (ret)
3731 					return ret;
3732 				for (i = 0; i < rdev->usec_timeout; i++) {
3733 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3734 					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3735 					if (tmp == levels)
3736 						break;
3737 					udelay(1);
3738 				}
3739 			}
3740 		}
3741 		if ((!pi->pcie_dpm_key_disabled) &&
3742 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3743 			levels = 0;
3744 			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3745 			while (tmp >>= 1)
3746 				levels++;
3747 			if (levels) {
3748 				ret = ci_dpm_force_state_pcie(rdev, level);
3749 				if (ret)
3750 					return ret;
3751 				for (i = 0; i < rdev->usec_timeout; i++) {
3752 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3753 					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3754 					if (tmp == levels)
3755 						break;
3756 					udelay(1);
3757 				}
3758 			}
3759 		}
3760 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3761 		if ((!pi->sclk_dpm_key_disabled) &&
3762 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3763 			levels = ci_get_lowest_enabled_level(rdev,
3764 							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3765 			ret = ci_dpm_force_state_sclk(rdev, levels);
3766 			if (ret)
3767 				return ret;
3768 			for (i = 0; i < rdev->usec_timeout; i++) {
3769 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3770 				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3771 				if (tmp == levels)
3772 					break;
3773 				udelay(1);
3774 			}
3775 		}
3776 		if ((!pi->mclk_dpm_key_disabled) &&
3777 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3778 			levels = ci_get_lowest_enabled_level(rdev,
3779 							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3780 			ret = ci_dpm_force_state_mclk(rdev, levels);
3781 			if (ret)
3782 				return ret;
3783 			for (i = 0; i < rdev->usec_timeout; i++) {
3784 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3785 				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3786 				if (tmp == levels)
3787 					break;
3788 				udelay(1);
3789 			}
3790 		}
3791 		if ((!pi->pcie_dpm_key_disabled) &&
3792 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3793 			levels = ci_get_lowest_enabled_level(rdev,
3794 							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3795 			ret = ci_dpm_force_state_pcie(rdev, levels);
3796 			if (ret)
3797 				return ret;
3798 			for (i = 0; i < rdev->usec_timeout; i++) {
3799 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3800 				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3801 				if (tmp == levels)
3802 					break;
3803 				udelay(1);
3804 			}
3805 		}
3806 	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3807 		if (!pi->sclk_dpm_key_disabled) {
3808 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3809 			if (smc_result != PPSMC_Result_OK)
3810 				return -EINVAL;
3811 		}
3812 		if (!pi->mclk_dpm_key_disabled) {
3813 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3814 			if (smc_result != PPSMC_Result_OK)
3815 				return -EINVAL;
3816 		}
3817 		if (!pi->pcie_dpm_key_disabled) {
3818 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3819 			if (smc_result != PPSMC_Result_OK)
3820 				return -EINVAL;
3821 		}
3822 	}
3823 
3824 	rdev->pm.dpm.forced_level = level;
3825 
3826 	return 0;
3827 }
3828 
3829 static int ci_set_mc_special_registers(struct radeon_device *rdev,
3830 				       struct ci_mc_reg_table *table)
3831 {
3832 	struct ci_power_info *pi = ci_get_pi(rdev);
3833 	u8 i, j, k;
3834 	u32 temp_reg;
3835 
3836 	for (i = 0, j = table->last; i < table->last; i++) {
3837 		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3838 			return -EINVAL;
3839 		switch(table->mc_reg_address[i].s1 << 2) {
3840 		case MC_SEQ_MISC1:
3841 			temp_reg = RREG32(MC_PMG_CMD_EMRS);
3842 			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3843 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3844 			for (k = 0; k < table->num_entries; k++) {
3845 				table->mc_reg_table_entry[k].mc_data[j] =
3846 					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3847 			}
3848 			j++;
3849 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3850 				return -EINVAL;
3851 
3852 			temp_reg = RREG32(MC_PMG_CMD_MRS);
3853 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3854 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3855 			for (k = 0; k < table->num_entries; k++) {
3856 				table->mc_reg_table_entry[k].mc_data[j] =
3857 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3858 				if (!pi->mem_gddr5)
3859 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3860 			}
3861 			j++;
3862 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3863 				return -EINVAL;
3864 
3865 			if (!pi->mem_gddr5) {
3866 				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3867 				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3868 				for (k = 0; k < table->num_entries; k++) {
3869 					table->mc_reg_table_entry[k].mc_data[j] =
3870 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3871 				}
3872 				j++;
3873 				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3874 					return -EINVAL;
3875 			}
3876 			break;
3877 		case MC_SEQ_RESERVE_M:
3878 			temp_reg = RREG32(MC_PMG_CMD_MRS1);
3879 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3880 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3881 			for (k = 0; k < table->num_entries; k++) {
3882 				table->mc_reg_table_entry[k].mc_data[j] =
3883 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3884 			}
3885 			j++;
3886 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3887 				return -EINVAL;
3888 			break;
3889 		default:
3890 			break;
3891 		}
3892 
3893 	}
3894 
3895 	table->last = j;
3896 
3897 	return 0;
3898 }
3899 
3900 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3901 {
3902 	bool result = true;
3903 
3904 	switch(in_reg) {
3905 	case MC_SEQ_RAS_TIMING >> 2:
3906 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3907 		break;
3908 	case MC_SEQ_DLL_STBY >> 2:
3909 		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3910 		break;
3911 	case MC_SEQ_G5PDX_CMD0 >> 2:
3912 		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3913 		break;
3914 	case MC_SEQ_G5PDX_CMD1 >> 2:
3915 		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3916 		break;
3917 	case MC_SEQ_G5PDX_CTRL >> 2:
3918 		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3919 		break;
3920 	case MC_SEQ_CAS_TIMING >> 2:
3921 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3922             break;
3923 	case MC_SEQ_MISC_TIMING >> 2:
3924 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3925 		break;
3926 	case MC_SEQ_MISC_TIMING2 >> 2:
3927 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3928 		break;
3929 	case MC_SEQ_PMG_DVS_CMD >> 2:
3930 		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3931 		break;
3932 	case MC_SEQ_PMG_DVS_CTL >> 2:
3933 		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3934 		break;
3935 	case MC_SEQ_RD_CTL_D0 >> 2:
3936 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3937 		break;
3938 	case MC_SEQ_RD_CTL_D1 >> 2:
3939 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3940 		break;
3941 	case MC_SEQ_WR_CTL_D0 >> 2:
3942 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3943 		break;
3944 	case MC_SEQ_WR_CTL_D1 >> 2:
3945 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3946 		break;
3947 	case MC_PMG_CMD_EMRS >> 2:
3948 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3949 		break;
3950 	case MC_PMG_CMD_MRS >> 2:
3951 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3952 		break;
3953 	case MC_PMG_CMD_MRS1 >> 2:
3954 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3955 		break;
3956 	case MC_SEQ_PMG_TIMING >> 2:
3957 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3958 		break;
3959 	case MC_PMG_CMD_MRS2 >> 2:
3960 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3961 		break;
3962 	case MC_SEQ_WR_CTL_2 >> 2:
3963 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3964 		break;
3965 	default:
3966 		result = false;
3967 		break;
3968 	}
3969 
3970 	return result;
3971 }
3972 
3973 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3974 {
3975 	u8 i, j;
3976 
3977 	for (i = 0; i < table->last; i++) {
3978 		for (j = 1; j < table->num_entries; j++) {
3979 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3980 			    table->mc_reg_table_entry[j].mc_data[i]) {
3981 				table->valid_flag |= 1 << i;
3982 				break;
3983 			}
3984 		}
3985 	}
3986 }
3987 
3988 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
3989 {
3990 	u32 i;
3991 	u16 address;
3992 
3993 	for (i = 0; i < table->last; i++) {
3994 		table->mc_reg_address[i].s0 =
3995 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
3996 			address : table->mc_reg_address[i].s1;
3997 	}
3998 }
3999 
4000 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4001 				      struct ci_mc_reg_table *ci_table)
4002 {
4003 	u8 i, j;
4004 
4005 	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4006 		return -EINVAL;
4007 	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4008 		return -EINVAL;
4009 
4010 	for (i = 0; i < table->last; i++)
4011 		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4012 
4013 	ci_table->last = table->last;
4014 
4015 	for (i = 0; i < table->num_entries; i++) {
4016 		ci_table->mc_reg_table_entry[i].mclk_max =
4017 			table->mc_reg_table_entry[i].mclk_max;
4018 		for (j = 0; j < table->last; j++)
4019 			ci_table->mc_reg_table_entry[i].mc_data[j] =
4020 				table->mc_reg_table_entry[i].mc_data[j];
4021 	}
4022 	ci_table->num_entries = table->num_entries;
4023 
4024 	return 0;
4025 }
4026 
4027 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4028 {
4029 	struct ci_power_info *pi = ci_get_pi(rdev);
4030 	struct atom_mc_reg_table *table;
4031 	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4032 	u8 module_index = rv770_get_memory_module_index(rdev);
4033 	int ret;
4034 
4035 	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4036 	if (!table)
4037 		return -ENOMEM;
4038 
4039 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4040 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4041 	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4042 	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4043 	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4044 	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4045 	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4046 	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4047 	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4048 	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4049 	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4050 	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4051 	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4052 	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4053 	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4054 	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4055 	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4056 	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4057 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4058 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4059 
4060 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4061 	if (ret)
4062 		goto init_mc_done;
4063 
4064         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4065 	if (ret)
4066 		goto init_mc_done;
4067 
4068 	ci_set_s0_mc_reg_index(ci_table);
4069 
4070 	ret = ci_set_mc_special_registers(rdev, ci_table);
4071 	if (ret)
4072 		goto init_mc_done;
4073 
4074 	ci_set_valid_flag(ci_table);
4075 
4076 init_mc_done:
4077 	kfree(table);
4078 
4079 	return ret;
4080 }
4081 
4082 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4083 					SMU7_Discrete_MCRegisters *mc_reg_table)
4084 {
4085 	struct ci_power_info *pi = ci_get_pi(rdev);
4086 	u32 i, j;
4087 
4088 	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4089 		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4090 			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4091 				return -EINVAL;
4092 			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4093 			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4094 			i++;
4095 		}
4096 	}
4097 
4098 	mc_reg_table->last = (u8)i;
4099 
4100 	return 0;
4101 }
4102 
4103 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4104 				    SMU7_Discrete_MCRegisterSet *data,
4105 				    u32 num_entries, u32 valid_flag)
4106 {
4107 	u32 i, j;
4108 
4109 	for (i = 0, j = 0; j < num_entries; j++) {
4110 		if (valid_flag & (1 << j)) {
4111 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4112 			i++;
4113 		}
4114 	}
4115 }
4116 
4117 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4118 						 const u32 memory_clock,
4119 						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4120 {
4121 	struct ci_power_info *pi = ci_get_pi(rdev);
4122 	u32 i = 0;
4123 
4124 	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4125 		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4126 			break;
4127 	}
4128 
4129 	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4130 		--i;
4131 
4132 	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4133 				mc_reg_table_data, pi->mc_reg_table.last,
4134 				pi->mc_reg_table.valid_flag);
4135 }
4136 
4137 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4138 					   SMU7_Discrete_MCRegisters *mc_reg_table)
4139 {
4140 	struct ci_power_info *pi = ci_get_pi(rdev);
4141 	u32 i;
4142 
4143 	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4144 		ci_convert_mc_reg_table_entry_to_smc(rdev,
4145 						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4146 						     &mc_reg_table->data[i]);
4147 }
4148 
4149 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4150 {
4151 	struct ci_power_info *pi = ci_get_pi(rdev);
4152 	int ret;
4153 
4154 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4155 
4156 	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4157 	if (ret)
4158 		return ret;
4159 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4160 
4161 	return ci_copy_bytes_to_smc(rdev,
4162 				    pi->mc_reg_table_start,
4163 				    (u8 *)&pi->smc_mc_reg_table,
4164 				    sizeof(SMU7_Discrete_MCRegisters),
4165 				    pi->sram_end);
4166 }
4167 
4168 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4169 {
4170 	struct ci_power_info *pi = ci_get_pi(rdev);
4171 
4172 	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4173 		return 0;
4174 
4175 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4176 
4177 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4178 
4179 	return ci_copy_bytes_to_smc(rdev,
4180 				    pi->mc_reg_table_start +
4181 				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4182 				    (u8 *)&pi->smc_mc_reg_table.data[0],
4183 				    sizeof(SMU7_Discrete_MCRegisterSet) *
4184 				    pi->dpm_table.mclk_table.count,
4185 				    pi->sram_end);
4186 }
4187 
4188 static void ci_enable_voltage_control(struct radeon_device *rdev)
4189 {
4190 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4191 
4192 	tmp |= VOLT_PWRMGT_EN;
4193 	WREG32_SMC(GENERAL_PWRMGT, tmp);
4194 }
4195 
4196 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4197 						      struct radeon_ps *radeon_state)
4198 {
4199 	struct ci_ps *state = ci_get_ps(radeon_state);
4200 	int i;
4201 	u16 pcie_speed, max_speed = 0;
4202 
4203 	for (i = 0; i < state->performance_level_count; i++) {
4204 		pcie_speed = state->performance_levels[i].pcie_gen;
4205 		if (max_speed < pcie_speed)
4206 			max_speed = pcie_speed;
4207 	}
4208 
4209 	return max_speed;
4210 }
4211 
4212 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4213 {
4214 	u32 speed_cntl = 0;
4215 
4216 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4217 	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4218 
4219 	return (u16)speed_cntl;
4220 }
4221 
4222 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4223 {
4224 	u32 link_width = 0;
4225 
4226 	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4227 	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4228 
4229 	switch (link_width) {
4230 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4231 		return 1;
4232 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4233 		return 2;
4234 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4235 		return 4;
4236 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4237 		return 8;
4238 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4239 		/* not actually supported */
4240 		return 12;
4241 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4242 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4243 	default:
4244 		return 16;
4245 	}
4246 }
4247 
4248 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4249 							     struct radeon_ps *radeon_new_state,
4250 							     struct radeon_ps *radeon_current_state)
4251 {
4252 	struct ci_power_info *pi = ci_get_pi(rdev);
4253 	enum radeon_pcie_gen target_link_speed =
4254 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4255 	enum radeon_pcie_gen current_link_speed;
4256 
4257 	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4258 		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4259 	else
4260 		current_link_speed = pi->force_pcie_gen;
4261 
4262 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4263 	pi->pspp_notify_required = false;
4264 	if (target_link_speed > current_link_speed) {
4265 		switch (target_link_speed) {
4266 #ifdef CONFIG_ACPI
4267 		case RADEON_PCIE_GEN3:
4268 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4269 				break;
4270 			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4271 			if (current_link_speed == RADEON_PCIE_GEN2)
4272 				break;
4273 		case RADEON_PCIE_GEN2:
4274 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4275 				break;
4276 #endif
4277 		default:
4278 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4279 			break;
4280 		}
4281 	} else {
4282 		if (target_link_speed < current_link_speed)
4283 			pi->pspp_notify_required = true;
4284 	}
4285 }
4286 
4287 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4288 							   struct radeon_ps *radeon_new_state,
4289 							   struct radeon_ps *radeon_current_state)
4290 {
4291 	struct ci_power_info *pi = ci_get_pi(rdev);
4292 	enum radeon_pcie_gen target_link_speed =
4293 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4294 	u8 request;
4295 
4296 	if (pi->pspp_notify_required) {
4297 		if (target_link_speed == RADEON_PCIE_GEN3)
4298 			request = PCIE_PERF_REQ_PECI_GEN3;
4299 		else if (target_link_speed == RADEON_PCIE_GEN2)
4300 			request = PCIE_PERF_REQ_PECI_GEN2;
4301 		else
4302 			request = PCIE_PERF_REQ_PECI_GEN1;
4303 
4304 		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4305 		    (ci_get_current_pcie_speed(rdev) > 0))
4306 			return;
4307 
4308 #ifdef CONFIG_ACPI
4309 		radeon_acpi_pcie_performance_request(rdev, request, false);
4310 #endif
4311 	}
4312 }
4313 
4314 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4315 {
4316 	struct ci_power_info *pi = ci_get_pi(rdev);
4317 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4318 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4319 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4320 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4321 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4322 		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4323 
4324 	if (allowed_sclk_vddc_table == NULL)
4325 		return -EINVAL;
4326 	if (allowed_sclk_vddc_table->count < 1)
4327 		return -EINVAL;
4328 	if (allowed_mclk_vddc_table == NULL)
4329 		return -EINVAL;
4330 	if (allowed_mclk_vddc_table->count < 1)
4331 		return -EINVAL;
4332 	if (allowed_mclk_vddci_table == NULL)
4333 		return -EINVAL;
4334 	if (allowed_mclk_vddci_table->count < 1)
4335 		return -EINVAL;
4336 
4337 	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4338 	pi->max_vddc_in_pp_table =
4339 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4340 
4341 	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4342 	pi->max_vddci_in_pp_table =
4343 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4344 
4345 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4346 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4347 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4348 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4349 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4350 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4351         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4352 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4353 
4354 	return 0;
4355 }
4356 
4357 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4358 {
4359 	struct ci_power_info *pi = ci_get_pi(rdev);
4360 	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4361 	u32 leakage_index;
4362 
4363 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4364 		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4365 			*vddc = leakage_table->actual_voltage[leakage_index];
4366 			break;
4367 		}
4368 	}
4369 }
4370 
4371 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4372 {
4373 	struct ci_power_info *pi = ci_get_pi(rdev);
4374 	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4375 	u32 leakage_index;
4376 
4377 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4378 		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4379 			*vddci = leakage_table->actual_voltage[leakage_index];
4380 			break;
4381 		}
4382 	}
4383 }
4384 
4385 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4386 								      struct radeon_clock_voltage_dependency_table *table)
4387 {
4388 	u32 i;
4389 
4390 	if (table) {
4391 		for (i = 0; i < table->count; i++)
4392 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4393 	}
4394 }
4395 
4396 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4397 								       struct radeon_clock_voltage_dependency_table *table)
4398 {
4399 	u32 i;
4400 
4401 	if (table) {
4402 		for (i = 0; i < table->count; i++)
4403 			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4404 	}
4405 }
4406 
4407 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4408 									  struct radeon_vce_clock_voltage_dependency_table *table)
4409 {
4410 	u32 i;
4411 
4412 	if (table) {
4413 		for (i = 0; i < table->count; i++)
4414 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4415 	}
4416 }
4417 
4418 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4419 									  struct radeon_uvd_clock_voltage_dependency_table *table)
4420 {
4421 	u32 i;
4422 
4423 	if (table) {
4424 		for (i = 0; i < table->count; i++)
4425 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4426 	}
4427 }
4428 
4429 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4430 								   struct radeon_phase_shedding_limits_table *table)
4431 {
4432 	u32 i;
4433 
4434 	if (table) {
4435 		for (i = 0; i < table->count; i++)
4436 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4437 	}
4438 }
4439 
4440 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4441 							    struct radeon_clock_and_voltage_limits *table)
4442 {
4443 	if (table) {
4444 		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4445 		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4446 	}
4447 }
4448 
4449 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4450 							 struct radeon_cac_leakage_table *table)
4451 {
4452 	u32 i;
4453 
4454 	if (table) {
4455 		for (i = 0; i < table->count; i++)
4456 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4457 	}
4458 }
4459 
4460 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4461 {
4462 
4463 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4464 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4465 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4466 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4467 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4468 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4469 	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4470 								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4471 	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4472 								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4473 	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4474 								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4475 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4476 								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4477 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4478 								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4479 	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4480 							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4481 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4482 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4483 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4484 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4485 	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4486 						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
4487 
4488 }
4489 
4490 static void ci_get_memory_type(struct radeon_device *rdev)
4491 {
4492 	struct ci_power_info *pi = ci_get_pi(rdev);
4493 	u32 tmp;
4494 
4495 	tmp = RREG32(MC_SEQ_MISC0);
4496 
4497 	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4498 	    MC_SEQ_MISC0_GDDR5_VALUE)
4499 		pi->mem_gddr5 = true;
4500 	else
4501 		pi->mem_gddr5 = false;
4502 
4503 }
4504 
4505 static void ci_update_current_ps(struct radeon_device *rdev,
4506 				 struct radeon_ps *rps)
4507 {
4508 	struct ci_ps *new_ps = ci_get_ps(rps);
4509 	struct ci_power_info *pi = ci_get_pi(rdev);
4510 
4511 	pi->current_rps = *rps;
4512 	pi->current_ps = *new_ps;
4513 	pi->current_rps.ps_priv = &pi->current_ps;
4514 }
4515 
4516 static void ci_update_requested_ps(struct radeon_device *rdev,
4517 				   struct radeon_ps *rps)
4518 {
4519 	struct ci_ps *new_ps = ci_get_ps(rps);
4520 	struct ci_power_info *pi = ci_get_pi(rdev);
4521 
4522 	pi->requested_rps = *rps;
4523 	pi->requested_ps = *new_ps;
4524 	pi->requested_rps.ps_priv = &pi->requested_ps;
4525 }
4526 
4527 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4528 {
4529 	struct ci_power_info *pi = ci_get_pi(rdev);
4530 	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4531 	struct radeon_ps *new_ps = &requested_ps;
4532 
4533 	ci_update_requested_ps(rdev, new_ps);
4534 
4535 	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4536 
4537 	return 0;
4538 }
4539 
4540 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4541 {
4542 	struct ci_power_info *pi = ci_get_pi(rdev);
4543 	struct radeon_ps *new_ps = &pi->requested_rps;
4544 
4545 	ci_update_current_ps(rdev, new_ps);
4546 }
4547 
4548 
4549 void ci_dpm_setup_asic(struct radeon_device *rdev)
4550 {
4551 	int r;
4552 
4553 	r = ci_mc_load_microcode(rdev);
4554 	if (r)
4555 		DRM_ERROR("Failed to load MC firmware!\n");
4556 	ci_read_clock_registers(rdev);
4557 	ci_get_memory_type(rdev);
4558 	ci_enable_acpi_power_management(rdev);
4559 	ci_init_sclk_t(rdev);
4560 }
4561 
4562 int ci_dpm_enable(struct radeon_device *rdev)
4563 {
4564 	struct ci_power_info *pi = ci_get_pi(rdev);
4565 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4566 	int ret;
4567 
4568 	if (ci_is_smc_running(rdev))
4569 		return -EINVAL;
4570 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4571 		ci_enable_voltage_control(rdev);
4572 		ret = ci_construct_voltage_tables(rdev);
4573 		if (ret) {
4574 			DRM_ERROR("ci_construct_voltage_tables failed\n");
4575 			return ret;
4576 		}
4577 	}
4578 	if (pi->caps_dynamic_ac_timing) {
4579 		ret = ci_initialize_mc_reg_table(rdev);
4580 		if (ret)
4581 			pi->caps_dynamic_ac_timing = false;
4582 	}
4583 	if (pi->dynamic_ss)
4584 		ci_enable_spread_spectrum(rdev, true);
4585 	if (pi->thermal_protection)
4586 		ci_enable_thermal_protection(rdev, true);
4587 	ci_program_sstp(rdev);
4588 	ci_enable_display_gap(rdev);
4589 	ci_program_vc(rdev);
4590 	ret = ci_upload_firmware(rdev);
4591 	if (ret) {
4592 		DRM_ERROR("ci_upload_firmware failed\n");
4593 		return ret;
4594 	}
4595 	ret = ci_process_firmware_header(rdev);
4596 	if (ret) {
4597 		DRM_ERROR("ci_process_firmware_header failed\n");
4598 		return ret;
4599 	}
4600 	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4601 	if (ret) {
4602 		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4603 		return ret;
4604 	}
4605 	ret = ci_init_smc_table(rdev);
4606 	if (ret) {
4607 		DRM_ERROR("ci_init_smc_table failed\n");
4608 		return ret;
4609 	}
4610 	ret = ci_init_arb_table_index(rdev);
4611 	if (ret) {
4612 		DRM_ERROR("ci_init_arb_table_index failed\n");
4613 		return ret;
4614 	}
4615 	if (pi->caps_dynamic_ac_timing) {
4616 		ret = ci_populate_initial_mc_reg_table(rdev);
4617 		if (ret) {
4618 			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4619 			return ret;
4620 		}
4621 	}
4622 	ret = ci_populate_pm_base(rdev);
4623 	if (ret) {
4624 		DRM_ERROR("ci_populate_pm_base failed\n");
4625 		return ret;
4626 	}
4627 	ci_dpm_start_smc(rdev);
4628 	ci_enable_vr_hot_gpio_interrupt(rdev);
4629 	ret = ci_notify_smc_display_change(rdev, false);
4630 	if (ret) {
4631 		DRM_ERROR("ci_notify_smc_display_change failed\n");
4632 		return ret;
4633 	}
4634 	ci_enable_sclk_control(rdev, true);
4635 	ret = ci_enable_ulv(rdev, true);
4636 	if (ret) {
4637 		DRM_ERROR("ci_enable_ulv failed\n");
4638 		return ret;
4639 	}
4640 	ret = ci_enable_ds_master_switch(rdev, true);
4641 	if (ret) {
4642 		DRM_ERROR("ci_enable_ds_master_switch failed\n");
4643 		return ret;
4644 	}
4645 	ret = ci_start_dpm(rdev);
4646 	if (ret) {
4647 		DRM_ERROR("ci_start_dpm failed\n");
4648 		return ret;
4649 	}
4650 	ret = ci_enable_didt(rdev, true);
4651 	if (ret) {
4652 		DRM_ERROR("ci_enable_didt failed\n");
4653 		return ret;
4654 	}
4655 	ret = ci_enable_smc_cac(rdev, true);
4656 	if (ret) {
4657 		DRM_ERROR("ci_enable_smc_cac failed\n");
4658 		return ret;
4659 	}
4660 	ret = ci_enable_power_containment(rdev, true);
4661 	if (ret) {
4662 		DRM_ERROR("ci_enable_power_containment failed\n");
4663 		return ret;
4664 	}
4665 
4666 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4667 
4668 	ci_update_current_ps(rdev, boot_ps);
4669 
4670 	return 0;
4671 }
4672 
4673 int ci_dpm_late_enable(struct radeon_device *rdev)
4674 {
4675 	int ret;
4676 
4677 	if (rdev->irq.installed &&
4678 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4679 #if 0
4680 		PPSMC_Result result;
4681 #endif
4682 		ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4683 		if (ret) {
4684 			DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4685 			return ret;
4686 		}
4687 		rdev->irq.dpm_thermal = true;
4688 		radeon_irq_set(rdev);
4689 #if 0
4690 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4691 
4692 		if (result != PPSMC_Result_OK)
4693 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4694 #endif
4695 	}
4696 
4697 	ci_dpm_powergate_uvd(rdev, true);
4698 
4699 	return 0;
4700 }
4701 
4702 void ci_dpm_disable(struct radeon_device *rdev)
4703 {
4704 	struct ci_power_info *pi = ci_get_pi(rdev);
4705 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4706 
4707 	ci_dpm_powergate_uvd(rdev, false);
4708 
4709 	if (!ci_is_smc_running(rdev))
4710 		return;
4711 
4712 	if (pi->thermal_protection)
4713 		ci_enable_thermal_protection(rdev, false);
4714 	ci_enable_power_containment(rdev, false);
4715 	ci_enable_smc_cac(rdev, false);
4716 	ci_enable_didt(rdev, false);
4717 	ci_enable_spread_spectrum(rdev, false);
4718 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4719 	ci_stop_dpm(rdev);
4720 	ci_enable_ds_master_switch(rdev, true);
4721 	ci_enable_ulv(rdev, false);
4722 	ci_clear_vc(rdev);
4723 	ci_reset_to_default(rdev);
4724 	ci_dpm_stop_smc(rdev);
4725 	ci_force_switch_to_arb_f0(rdev);
4726 
4727 	ci_update_current_ps(rdev, boot_ps);
4728 }
4729 
4730 int ci_dpm_set_power_state(struct radeon_device *rdev)
4731 {
4732 	struct ci_power_info *pi = ci_get_pi(rdev);
4733 	struct radeon_ps *new_ps = &pi->requested_rps;
4734 	struct radeon_ps *old_ps = &pi->current_rps;
4735 	int ret;
4736 
4737 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4738 	if (pi->pcie_performance_request)
4739 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4740 	ret = ci_freeze_sclk_mclk_dpm(rdev);
4741 	if (ret) {
4742 		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4743 		return ret;
4744 	}
4745 	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4746 	if (ret) {
4747 		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4748 		return ret;
4749 	}
4750 	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4751 	if (ret) {
4752 		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4753 		return ret;
4754 	}
4755 #if 0
4756 	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4757 	if (ret) {
4758 		DRM_ERROR("ci_update_vce_dpm failed\n");
4759 		return ret;
4760 	}
4761 #endif
4762 	ret = ci_update_sclk_t(rdev);
4763 	if (ret) {
4764 		DRM_ERROR("ci_update_sclk_t failed\n");
4765 		return ret;
4766 	}
4767 	if (pi->caps_dynamic_ac_timing) {
4768 		ret = ci_update_and_upload_mc_reg_table(rdev);
4769 		if (ret) {
4770 			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4771 			return ret;
4772 		}
4773 	}
4774 	ret = ci_program_memory_timing_parameters(rdev);
4775 	if (ret) {
4776 		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4777 		return ret;
4778 	}
4779 	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4780 	if (ret) {
4781 		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4782 		return ret;
4783 	}
4784 	ret = ci_upload_dpm_level_enable_mask(rdev);
4785 	if (ret) {
4786 		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4787 		return ret;
4788 	}
4789 	if (pi->pcie_performance_request)
4790 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4791 
4792 	return 0;
4793 }
4794 
4795 int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4796 {
4797 	return ci_power_control_set_level(rdev);
4798 }
4799 
4800 void ci_dpm_reset_asic(struct radeon_device *rdev)
4801 {
4802 	ci_set_boot_state(rdev);
4803 }
4804 
4805 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4806 {
4807 	ci_program_display_gap(rdev);
4808 }
4809 
4810 union power_info {
4811 	struct _ATOM_POWERPLAY_INFO info;
4812 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
4813 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
4814 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4815 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4816 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4817 };
4818 
4819 union pplib_clock_info {
4820 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4821 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4822 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4823 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4824 	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4825 	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4826 };
4827 
4828 union pplib_power_state {
4829 	struct _ATOM_PPLIB_STATE v1;
4830 	struct _ATOM_PPLIB_STATE_V2 v2;
4831 };
4832 
4833 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4834 					  struct radeon_ps *rps,
4835 					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4836 					  u8 table_rev)
4837 {
4838 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4839 	rps->class = le16_to_cpu(non_clock_info->usClassification);
4840 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4841 
4842 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4843 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4844 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4845 	} else {
4846 		rps->vclk = 0;
4847 		rps->dclk = 0;
4848 	}
4849 
4850 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4851 		rdev->pm.dpm.boot_ps = rps;
4852 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4853 		rdev->pm.dpm.uvd_ps = rps;
4854 }
4855 
4856 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4857 				      struct radeon_ps *rps, int index,
4858 				      union pplib_clock_info *clock_info)
4859 {
4860 	struct ci_power_info *pi = ci_get_pi(rdev);
4861 	struct ci_ps *ps = ci_get_ps(rps);
4862 	struct ci_pl *pl = &ps->performance_levels[index];
4863 
4864 	ps->performance_level_count = index + 1;
4865 
4866 	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4867 	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4868 	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4869 	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4870 
4871 	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4872 						 pi->sys_pcie_mask,
4873 						 pi->vbios_boot_state.pcie_gen_bootup_value,
4874 						 clock_info->ci.ucPCIEGen);
4875 	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4876 						   pi->vbios_boot_state.pcie_lane_bootup_value,
4877 						   le16_to_cpu(clock_info->ci.usPCIELane));
4878 
4879 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4880 		pi->acpi_pcie_gen = pl->pcie_gen;
4881 	}
4882 
4883 	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4884 		pi->ulv.supported = true;
4885 		pi->ulv.pl = *pl;
4886 		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4887 	}
4888 
4889 	/* patch up boot state */
4890 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4891 		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4892 		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4893 		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4894 		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4895 	}
4896 
4897 	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4898 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4899 		pi->use_pcie_powersaving_levels = true;
4900 		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4901 			pi->pcie_gen_powersaving.max = pl->pcie_gen;
4902 		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4903 			pi->pcie_gen_powersaving.min = pl->pcie_gen;
4904 		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4905 			pi->pcie_lane_powersaving.max = pl->pcie_lane;
4906 		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4907 			pi->pcie_lane_powersaving.min = pl->pcie_lane;
4908 		break;
4909 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4910 		pi->use_pcie_performance_levels = true;
4911 		if (pi->pcie_gen_performance.max < pl->pcie_gen)
4912 			pi->pcie_gen_performance.max = pl->pcie_gen;
4913 		if (pi->pcie_gen_performance.min > pl->pcie_gen)
4914 			pi->pcie_gen_performance.min = pl->pcie_gen;
4915 		if (pi->pcie_lane_performance.max < pl->pcie_lane)
4916 			pi->pcie_lane_performance.max = pl->pcie_lane;
4917 		if (pi->pcie_lane_performance.min > pl->pcie_lane)
4918 			pi->pcie_lane_performance.min = pl->pcie_lane;
4919 		break;
4920 	default:
4921 		break;
4922 	}
4923 }
4924 
4925 static int ci_parse_power_table(struct radeon_device *rdev)
4926 {
4927 	struct radeon_mode_info *mode_info = &rdev->mode_info;
4928 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4929 	union pplib_power_state *power_state;
4930 	int i, j, k, non_clock_array_index, clock_array_index;
4931 	union pplib_clock_info *clock_info;
4932 	struct _StateArray *state_array;
4933 	struct _ClockInfoArray *clock_info_array;
4934 	struct _NonClockInfoArray *non_clock_info_array;
4935 	union power_info *power_info;
4936 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4937         u16 data_offset;
4938 	u8 frev, crev;
4939 	u8 *power_state_offset;
4940 	struct ci_ps *ps;
4941 
4942 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4943 				   &frev, &crev, &data_offset))
4944 		return -EINVAL;
4945 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4946 
4947 	state_array = (struct _StateArray *)
4948 		(mode_info->atom_context->bios + data_offset +
4949 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4950 	clock_info_array = (struct _ClockInfoArray *)
4951 		(mode_info->atom_context->bios + data_offset +
4952 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4953 	non_clock_info_array = (struct _NonClockInfoArray *)
4954 		(mode_info->atom_context->bios + data_offset +
4955 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4956 
4957 	rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4958 				  state_array->ucNumEntries, GFP_KERNEL);
4959 	if (!rdev->pm.dpm.ps)
4960 		return -ENOMEM;
4961 	power_state_offset = (u8 *)state_array->states;
4962 	rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4963 	rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4964 	rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4965 	for (i = 0; i < state_array->ucNumEntries; i++) {
4966 		u8 *idx;
4967 		power_state = (union pplib_power_state *)power_state_offset;
4968 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
4969 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4970 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
4971 		if (!rdev->pm.power_state[i].clock_info)
4972 			return -EINVAL;
4973 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4974 		if (ps == NULL) {
4975 			kfree(rdev->pm.dpm.ps);
4976 			return -ENOMEM;
4977 		}
4978 		rdev->pm.dpm.ps[i].ps_priv = ps;
4979 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4980 					      non_clock_info,
4981 					      non_clock_info_array->ucEntrySize);
4982 		k = 0;
4983 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
4984 		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4985 			clock_array_index = idx[j];
4986 			if (clock_array_index >= clock_info_array->ucNumEntries)
4987 				continue;
4988 			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4989 				break;
4990 			clock_info = (union pplib_clock_info *)
4991 				((u8 *)&clock_info_array->clockInfo[0] +
4992 				 (clock_array_index * clock_info_array->ucEntrySize));
4993 			ci_parse_pplib_clock_info(rdev,
4994 						  &rdev->pm.dpm.ps[i], k,
4995 						  clock_info);
4996 			k++;
4997 		}
4998 		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
4999 	}
5000 	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5001 	return 0;
5002 }
5003 
5004 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5005 				    struct ci_vbios_boot_state *boot_state)
5006 {
5007 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5008 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5009 	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5010 	u8 frev, crev;
5011 	u16 data_offset;
5012 
5013 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5014 				   &frev, &crev, &data_offset)) {
5015 		firmware_info =
5016 			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5017 						    data_offset);
5018 		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5019 		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5020 		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5021 		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5022 		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5023 		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5024 		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5025 
5026 		return 0;
5027 	}
5028 	return -EINVAL;
5029 }
5030 
5031 void ci_dpm_fini(struct radeon_device *rdev)
5032 {
5033 	int i;
5034 
5035 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5036 		kfree(rdev->pm.dpm.ps[i].ps_priv);
5037 	}
5038 	kfree(rdev->pm.dpm.ps);
5039 	kfree(rdev->pm.dpm.priv);
5040 	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5041 	r600_free_extended_power_table(rdev);
5042 }
5043 
5044 int ci_dpm_init(struct radeon_device *rdev)
5045 {
5046 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5047 	u16 data_offset, size;
5048 	u8 frev, crev;
5049 	struct ci_power_info *pi;
5050 	int ret;
5051 	u32 mask;
5052 
5053 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5054 	if (pi == NULL)
5055 		return -ENOMEM;
5056 	rdev->pm.dpm.priv = pi;
5057 
5058 	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5059 	if (ret)
5060 		pi->sys_pcie_mask = 0;
5061 	else
5062 		pi->sys_pcie_mask = mask;
5063 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5064 
5065 	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5066 	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5067 	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5068 	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5069 
5070 	pi->pcie_lane_performance.max = 0;
5071 	pi->pcie_lane_performance.min = 16;
5072 	pi->pcie_lane_powersaving.max = 0;
5073 	pi->pcie_lane_powersaving.min = 16;
5074 
5075 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5076 	if (ret) {
5077 		ci_dpm_fini(rdev);
5078 		return ret;
5079 	}
5080 	ret = ci_parse_power_table(rdev);
5081 	if (ret) {
5082 		ci_dpm_fini(rdev);
5083 		return ret;
5084 	}
5085 	ret = r600_parse_extended_power_table(rdev);
5086 	if (ret) {
5087 		ci_dpm_fini(rdev);
5088 		return ret;
5089 	}
5090 
5091         pi->dll_default_on = false;
5092         pi->sram_end = SMC_RAM_END;
5093 
5094 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5095 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5096 	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5097 	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5098 	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5099 	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5100 	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5101 	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5102 
5103 	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5104 
5105 	pi->sclk_dpm_key_disabled = 0;
5106 	pi->mclk_dpm_key_disabled = 0;
5107 	pi->pcie_dpm_key_disabled = 0;
5108 
5109 	pi->caps_sclk_ds = true;
5110 
5111 	pi->mclk_strobe_mode_threshold = 40000;
5112 	pi->mclk_stutter_mode_threshold = 40000;
5113 	pi->mclk_edc_enable_threshold = 40000;
5114 	pi->mclk_edc_wr_enable_threshold = 40000;
5115 
5116 	ci_initialize_powertune_defaults(rdev);
5117 
5118 	pi->caps_fps = false;
5119 
5120 	pi->caps_sclk_throttle_low_notification = false;
5121 
5122 	pi->caps_uvd_dpm = true;
5123 
5124         ci_get_leakage_voltages(rdev);
5125         ci_patch_dependency_tables_with_leakage(rdev);
5126         ci_set_private_data_variables_based_on_pptable(rdev);
5127 
5128 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5129 		kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5130 	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5131 		ci_dpm_fini(rdev);
5132 		return -ENOMEM;
5133 	}
5134 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5135 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5136 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5137 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5138 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5139 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5140 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5141 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5142 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5143 
5144 	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5145 	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5146 	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5147 
5148 	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5149 	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5150 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5151 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5152 
5153 	if (rdev->family == CHIP_HAWAII) {
5154 		pi->thermal_temp_setting.temperature_low = 94500;
5155 		pi->thermal_temp_setting.temperature_high = 95000;
5156 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5157 	} else {
5158 		pi->thermal_temp_setting.temperature_low = 99500;
5159 		pi->thermal_temp_setting.temperature_high = 100000;
5160 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5161 	}
5162 
5163 	pi->uvd_enabled = false;
5164 
5165 	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5166 	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5167 	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5168 	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5169 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5170 	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5171 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5172 
5173 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5174 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5175 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5176 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5177 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5178 		else
5179 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5180         }
5181 
5182 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5183 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5184 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5185 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5186 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5187 		else
5188 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5189 	}
5190 
5191 	pi->vddc_phase_shed_control = true;
5192 
5193 #if defined(CONFIG_ACPI)
5194 	pi->pcie_performance_request =
5195 		radeon_acpi_is_pcie_performance_request_supported(rdev);
5196 #else
5197 	pi->pcie_performance_request = false;
5198 #endif
5199 
5200 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5201                                    &frev, &crev, &data_offset)) {
5202 		pi->caps_sclk_ss_support = true;
5203 		pi->caps_mclk_ss_support = true;
5204 		pi->dynamic_ss = true;
5205 	} else {
5206 		pi->caps_sclk_ss_support = false;
5207 		pi->caps_mclk_ss_support = false;
5208 		pi->dynamic_ss = true;
5209 	}
5210 
5211 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5212 		pi->thermal_protection = true;
5213 	else
5214 		pi->thermal_protection = false;
5215 
5216 	pi->caps_dynamic_ac_timing = true;
5217 
5218 	pi->uvd_power_gated = false;
5219 
5220 	/* make sure dc limits are valid */
5221 	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5222 	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5223 		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5224 			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5225 
5226 	return 0;
5227 }
5228 
5229 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5230 						    struct seq_file *m)
5231 {
5232 	u32 sclk = ci_get_average_sclk_freq(rdev);
5233 	u32 mclk = ci_get_average_mclk_freq(rdev);
5234 
5235 	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5236 		   sclk, mclk);
5237 }
5238 
5239 void ci_dpm_print_power_state(struct radeon_device *rdev,
5240 			      struct radeon_ps *rps)
5241 {
5242 	struct ci_ps *ps = ci_get_ps(rps);
5243 	struct ci_pl *pl;
5244 	int i;
5245 
5246 	r600_dpm_print_class_info(rps->class, rps->class2);
5247 	r600_dpm_print_cap_info(rps->caps);
5248 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5249 	for (i = 0; i < ps->performance_level_count; i++) {
5250 		pl = &ps->performance_levels[i];
5251 		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5252 		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5253 	}
5254 	r600_dpm_print_ps_status(rdev, rps);
5255 }
5256 
5257 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5258 {
5259 	struct ci_power_info *pi = ci_get_pi(rdev);
5260 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5261 
5262 	if (low)
5263 		return requested_state->performance_levels[0].sclk;
5264 	else
5265 		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5266 }
5267 
5268 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5269 {
5270 	struct ci_power_info *pi = ci_get_pi(rdev);
5271 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5272 
5273 	if (low)
5274 		return requested_state->performance_levels[0].mclk;
5275 	else
5276 		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5277 }
5278