xref: /linux/drivers/gpu/drm/radeon/ci_dpm.c (revision 04eeb606a8383b306f4bc6991da8231b5f3924b0)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "radeon.h"
27 #include "radeon_ucode.h"
28 #include "cikd.h"
29 #include "r600_dpm.h"
30 #include "ci_dpm.h"
31 #include "atom.h"
32 #include <linux/seq_file.h>
33 
34 #define MC_CG_ARB_FREQ_F0           0x0a
35 #define MC_CG_ARB_FREQ_F1           0x0b
36 #define MC_CG_ARB_FREQ_F2           0x0c
37 #define MC_CG_ARB_FREQ_F3           0x0d
38 
39 #define SMC_RAM_END 0x40000
40 
41 #define VOLTAGE_SCALE               4
42 #define VOLTAGE_VID_OFFSET_SCALE1    625
43 #define VOLTAGE_VID_OFFSET_SCALE2    100
44 
45 static const struct ci_pt_defaults defaults_hawaii_xt =
46 {
47 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
48 	{ 0x84,  0x0,   0x0,   0x7F,  0x0,   0x0,   0x5A,  0x60,  0x51,  0x8E,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
49 	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
50 };
51 
52 static const struct ci_pt_defaults defaults_hawaii_pro =
53 {
54 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
55 	{ 0x93,  0x0,   0x0,   0x97,  0x0,   0x0,   0x6B,  0x60,  0x51,  0x95,  0x79,  0x6B,  0x5F,  0x90,  0x79  },
56 	{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
57 };
58 
59 static const struct ci_pt_defaults defaults_bonaire_xt =
60 {
61 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
62 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
63 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
64 };
65 
66 static const struct ci_pt_defaults defaults_bonaire_pro =
67 {
68 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
69 	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
70 	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
71 };
72 
73 static const struct ci_pt_defaults defaults_saturn_xt =
74 {
75 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
76 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
77 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
78 };
79 
80 static const struct ci_pt_defaults defaults_saturn_pro =
81 {
82 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
83 	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
84 	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
85 };
86 
87 static const struct ci_pt_config_reg didt_config_ci[] =
88 {
89 	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
90 	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
102 	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
103 	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
104 	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
105 	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
106 	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107 	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108 	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
120 	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
121 	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
122 	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
123 	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
124 	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
138 	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
139 	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
140 	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
141 	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
142 	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147 	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148 	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149 	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
156 	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
157 	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
158 	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
159 	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
160 	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161 	{ 0xFFFFFFFF }
162 };
163 
164 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
165 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
166 				       u32 arb_freq_src, u32 arb_freq_dest);
167 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
168 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
169 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
170 						     u32 max_voltage_steps,
171 						     struct atom_voltage_table *voltage_table);
172 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
173 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
174 extern int ci_mc_load_microcode(struct radeon_device *rdev);
175 extern void cik_update_cg(struct radeon_device *rdev,
176 			  u32 block, bool enable);
177 
178 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
179 					 struct atom_voltage_table_entry *voltage_table,
180 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
181 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
182 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
183 				       u32 target_tdp);
184 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
185 
186 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
187 {
188         struct ci_power_info *pi = rdev->pm.dpm.priv;
189 
190         return pi;
191 }
192 
193 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
194 {
195 	struct ci_ps *ps = rps->ps_priv;
196 
197 	return ps;
198 }
199 
200 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
201 {
202 	struct ci_power_info *pi = ci_get_pi(rdev);
203 
204 	switch (rdev->pdev->device) {
205 	case 0x6649:
206 	case 0x6650:
207 	case 0x6651:
208 	case 0x6658:
209 	case 0x665C:
210 	case 0x665D:
211 	default:
212 		pi->powertune_defaults = &defaults_bonaire_xt;
213 		break;
214 	case 0x6640:
215 	case 0x6641:
216 	case 0x6646:
217 	case 0x6647:
218 		pi->powertune_defaults = &defaults_saturn_xt;
219 		break;
220 	case 0x67B8:
221 	case 0x67B0:
222 		pi->powertune_defaults = &defaults_hawaii_xt;
223 		break;
224 	case 0x67BA:
225 	case 0x67B1:
226 		pi->powertune_defaults = &defaults_hawaii_pro;
227 		break;
228 	case 0x67A0:
229 	case 0x67A1:
230 	case 0x67A2:
231 	case 0x67A8:
232 	case 0x67A9:
233 	case 0x67AA:
234 	case 0x67B9:
235 	case 0x67BE:
236 		pi->powertune_defaults = &defaults_bonaire_xt;
237 		break;
238 	}
239 
240 	pi->dte_tj_offset = 0;
241 
242 	pi->caps_power_containment = true;
243 	pi->caps_cac = false;
244 	pi->caps_sq_ramping = false;
245 	pi->caps_db_ramping = false;
246 	pi->caps_td_ramping = false;
247 	pi->caps_tcp_ramping = false;
248 
249 	if (pi->caps_power_containment) {
250 		pi->caps_cac = true;
251 		pi->enable_bapm_feature = true;
252 		pi->enable_tdc_limit_feature = true;
253 		pi->enable_pkg_pwr_tracking_feature = true;
254 	}
255 }
256 
257 static u8 ci_convert_to_vid(u16 vddc)
258 {
259 	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
260 }
261 
262 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
263 {
264 	struct ci_power_info *pi = ci_get_pi(rdev);
265 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
266 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
267 	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
268 	u32 i;
269 
270 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
271 		return -EINVAL;
272 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
273 		return -EINVAL;
274 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
275 	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
276 		return -EINVAL;
277 
278 	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
279 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
280 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
281 			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
282 			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
283 		} else {
284 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
285 			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
286 		}
287 	}
288 	return 0;
289 }
290 
291 static int ci_populate_vddc_vid(struct radeon_device *rdev)
292 {
293 	struct ci_power_info *pi = ci_get_pi(rdev);
294 	u8 *vid = pi->smc_powertune_table.VddCVid;
295 	u32 i;
296 
297 	if (pi->vddc_voltage_table.count > 8)
298 		return -EINVAL;
299 
300 	for (i = 0; i < pi->vddc_voltage_table.count; i++)
301 		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
302 
303 	return 0;
304 }
305 
306 static int ci_populate_svi_load_line(struct radeon_device *rdev)
307 {
308 	struct ci_power_info *pi = ci_get_pi(rdev);
309 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
310 
311 	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
312 	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
313 	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
314 	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
315 
316 	return 0;
317 }
318 
319 static int ci_populate_tdc_limit(struct radeon_device *rdev)
320 {
321 	struct ci_power_info *pi = ci_get_pi(rdev);
322 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
323 	u16 tdc_limit;
324 
325 	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
326 	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
327 	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
328 		pt_defaults->tdc_vddc_throttle_release_limit_perc;
329 	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
330 
331 	return 0;
332 }
333 
334 static int ci_populate_dw8(struct radeon_device *rdev)
335 {
336 	struct ci_power_info *pi = ci_get_pi(rdev);
337 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
338 	int ret;
339 
340 	ret = ci_read_smc_sram_dword(rdev,
341 				     SMU7_FIRMWARE_HEADER_LOCATION +
342 				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
343 				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
344 				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
345 				     pi->sram_end);
346 	if (ret)
347 		return -EINVAL;
348 	else
349 		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
350 
351 	return 0;
352 }
353 
354 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
355 {
356 	struct ci_power_info *pi = ci_get_pi(rdev);
357 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
358 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
359 	int i, min, max;
360 
361 	min = max = hi_vid[0];
362 	for (i = 0; i < 8; i++) {
363 		if (0 != hi_vid[i]) {
364 			if (min > hi_vid[i])
365 				min = hi_vid[i];
366 			if (max < hi_vid[i])
367 				max = hi_vid[i];
368 		}
369 
370 		if (0 != lo_vid[i]) {
371 			if (min > lo_vid[i])
372 				min = lo_vid[i];
373 			if (max < lo_vid[i])
374 				max = lo_vid[i];
375 		}
376 	}
377 
378 	if ((min == 0) || (max == 0))
379 		return -EINVAL;
380 	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
381 	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
382 
383 	return 0;
384 }
385 
386 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
387 {
388 	struct ci_power_info *pi = ci_get_pi(rdev);
389 	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
390 	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
391 	struct radeon_cac_tdp_table *cac_tdp_table =
392 		rdev->pm.dpm.dyn_state.cac_tdp_table;
393 
394 	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
395 	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
396 
397 	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
398 	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
399 
400 	return 0;
401 }
402 
403 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
404 {
405 	struct ci_power_info *pi = ci_get_pi(rdev);
406 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
407 	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
408 	struct radeon_cac_tdp_table *cac_tdp_table =
409 		rdev->pm.dpm.dyn_state.cac_tdp_table;
410 	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
411 	int i, j, k;
412 	const u16 *def1;
413 	const u16 *def2;
414 
415 	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
416 	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
417 
418 	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
419 	dpm_table->GpuTjMax =
420 		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
421 	dpm_table->GpuTjHyst = 8;
422 
423 	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
424 
425 	if (ppm) {
426 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
427 		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
428 	} else {
429 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
430 		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
431 	}
432 
433 	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
434 	def1 = pt_defaults->bapmti_r;
435 	def2 = pt_defaults->bapmti_rc;
436 
437 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
438 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
439 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
440 				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
441 				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
442 				def1++;
443 				def2++;
444 			}
445 		}
446 	}
447 
448 	return 0;
449 }
450 
451 static int ci_populate_pm_base(struct radeon_device *rdev)
452 {
453 	struct ci_power_info *pi = ci_get_pi(rdev);
454 	u32 pm_fuse_table_offset;
455 	int ret;
456 
457 	if (pi->caps_power_containment) {
458 		ret = ci_read_smc_sram_dword(rdev,
459 					     SMU7_FIRMWARE_HEADER_LOCATION +
460 					     offsetof(SMU7_Firmware_Header, PmFuseTable),
461 					     &pm_fuse_table_offset, pi->sram_end);
462 		if (ret)
463 			return ret;
464 		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
465 		if (ret)
466 			return ret;
467 		ret = ci_populate_vddc_vid(rdev);
468 		if (ret)
469 			return ret;
470 		ret = ci_populate_svi_load_line(rdev);
471 		if (ret)
472 			return ret;
473 		ret = ci_populate_tdc_limit(rdev);
474 		if (ret)
475 			return ret;
476 		ret = ci_populate_dw8(rdev);
477 		if (ret)
478 			return ret;
479 		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
480 		if (ret)
481 			return ret;
482 		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
483 		if (ret)
484 			return ret;
485 		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
486 					   (u8 *)&pi->smc_powertune_table,
487 					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
488 		if (ret)
489 			return ret;
490 	}
491 
492 	return 0;
493 }
494 
495 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
496 {
497 	struct ci_power_info *pi = ci_get_pi(rdev);
498 	u32 data;
499 
500 	if (pi->caps_sq_ramping) {
501 		data = RREG32_DIDT(DIDT_SQ_CTRL0);
502 		if (enable)
503 			data |= DIDT_CTRL_EN;
504 		else
505 			data &= ~DIDT_CTRL_EN;
506 		WREG32_DIDT(DIDT_SQ_CTRL0, data);
507 	}
508 
509 	if (pi->caps_db_ramping) {
510 		data = RREG32_DIDT(DIDT_DB_CTRL0);
511 		if (enable)
512 			data |= DIDT_CTRL_EN;
513 		else
514 			data &= ~DIDT_CTRL_EN;
515 		WREG32_DIDT(DIDT_DB_CTRL0, data);
516 	}
517 
518 	if (pi->caps_td_ramping) {
519 		data = RREG32_DIDT(DIDT_TD_CTRL0);
520 		if (enable)
521 			data |= DIDT_CTRL_EN;
522 		else
523 			data &= ~DIDT_CTRL_EN;
524 		WREG32_DIDT(DIDT_TD_CTRL0, data);
525 	}
526 
527 	if (pi->caps_tcp_ramping) {
528 		data = RREG32_DIDT(DIDT_TCP_CTRL0);
529 		if (enable)
530 			data |= DIDT_CTRL_EN;
531 		else
532 			data &= ~DIDT_CTRL_EN;
533 		WREG32_DIDT(DIDT_TCP_CTRL0, data);
534 	}
535 }
536 
537 static int ci_program_pt_config_registers(struct radeon_device *rdev,
538 					  const struct ci_pt_config_reg *cac_config_regs)
539 {
540 	const struct ci_pt_config_reg *config_regs = cac_config_regs;
541 	u32 data;
542 	u32 cache = 0;
543 
544 	if (config_regs == NULL)
545 		return -EINVAL;
546 
547 	while (config_regs->offset != 0xFFFFFFFF) {
548 		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
549 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
550 		} else {
551 			switch (config_regs->type) {
552 			case CISLANDS_CONFIGREG_SMC_IND:
553 				data = RREG32_SMC(config_regs->offset);
554 				break;
555 			case CISLANDS_CONFIGREG_DIDT_IND:
556 				data = RREG32_DIDT(config_regs->offset);
557 				break;
558 			default:
559 				data = RREG32(config_regs->offset << 2);
560 				break;
561 			}
562 
563 			data &= ~config_regs->mask;
564 			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
565 			data |= cache;
566 
567 			switch (config_regs->type) {
568 			case CISLANDS_CONFIGREG_SMC_IND:
569 				WREG32_SMC(config_regs->offset, data);
570 				break;
571 			case CISLANDS_CONFIGREG_DIDT_IND:
572 				WREG32_DIDT(config_regs->offset, data);
573 				break;
574 			default:
575 				WREG32(config_regs->offset << 2, data);
576 				break;
577 			}
578 			cache = 0;
579 		}
580 		config_regs++;
581 	}
582 	return 0;
583 }
584 
585 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
586 {
587 	struct ci_power_info *pi = ci_get_pi(rdev);
588 	int ret;
589 
590 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
591 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
592 		cik_enter_rlc_safe_mode(rdev);
593 
594 		if (enable) {
595 			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
596 			if (ret) {
597 				cik_exit_rlc_safe_mode(rdev);
598 				return ret;
599 			}
600 		}
601 
602 		ci_do_enable_didt(rdev, enable);
603 
604 		cik_exit_rlc_safe_mode(rdev);
605 	}
606 
607 	return 0;
608 }
609 
610 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
611 {
612 	struct ci_power_info *pi = ci_get_pi(rdev);
613 	PPSMC_Result smc_result;
614 	int ret = 0;
615 
616 	if (enable) {
617 		pi->power_containment_features = 0;
618 		if (pi->caps_power_containment) {
619 			if (pi->enable_bapm_feature) {
620 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
621 				if (smc_result != PPSMC_Result_OK)
622 					ret = -EINVAL;
623 				else
624 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
625 			}
626 
627 			if (pi->enable_tdc_limit_feature) {
628 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
629 				if (smc_result != PPSMC_Result_OK)
630 					ret = -EINVAL;
631 				else
632 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
633 			}
634 
635 			if (pi->enable_pkg_pwr_tracking_feature) {
636 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
637 				if (smc_result != PPSMC_Result_OK) {
638 					ret = -EINVAL;
639 				} else {
640 					struct radeon_cac_tdp_table *cac_tdp_table =
641 						rdev->pm.dpm.dyn_state.cac_tdp_table;
642 					u32 default_pwr_limit =
643 						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
644 
645 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
646 
647 					ci_set_power_limit(rdev, default_pwr_limit);
648 				}
649 			}
650 		}
651 	} else {
652 		if (pi->caps_power_containment && pi->power_containment_features) {
653 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
654 				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
655 
656 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
657 				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
658 
659 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
660 				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
661 			pi->power_containment_features = 0;
662 		}
663 	}
664 
665 	return ret;
666 }
667 
668 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
669 {
670 	struct ci_power_info *pi = ci_get_pi(rdev);
671 	PPSMC_Result smc_result;
672 	int ret = 0;
673 
674 	if (pi->caps_cac) {
675 		if (enable) {
676 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
677 			if (smc_result != PPSMC_Result_OK) {
678 				ret = -EINVAL;
679 				pi->cac_enabled = false;
680 			} else {
681 				pi->cac_enabled = true;
682 			}
683 		} else if (pi->cac_enabled) {
684 			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
685 			pi->cac_enabled = false;
686 		}
687 	}
688 
689 	return ret;
690 }
691 
692 static int ci_power_control_set_level(struct radeon_device *rdev)
693 {
694 	struct ci_power_info *pi = ci_get_pi(rdev);
695 	struct radeon_cac_tdp_table *cac_tdp_table =
696 		rdev->pm.dpm.dyn_state.cac_tdp_table;
697 	s32 adjust_percent;
698 	s32 target_tdp;
699 	int ret = 0;
700 	bool adjust_polarity = false; /* ??? */
701 
702 	if (pi->caps_power_containment &&
703 	    (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
704 		adjust_percent = adjust_polarity ?
705 			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
706 		target_tdp = ((100 + adjust_percent) *
707 			      (s32)cac_tdp_table->configurable_tdp) / 100;
708 		target_tdp *= 256;
709 
710 		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
711 	}
712 
713 	return ret;
714 }
715 
716 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
717 {
718 	struct ci_power_info *pi = ci_get_pi(rdev);
719 
720 	if (pi->uvd_power_gated == gate)
721 		return;
722 
723 	pi->uvd_power_gated = gate;
724 
725 	ci_update_uvd_dpm(rdev, gate);
726 }
727 
728 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
729 {
730 	struct ci_power_info *pi = ci_get_pi(rdev);
731 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
732 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
733 
734 	if (vblank_time < switch_limit)
735 		return true;
736 	else
737 		return false;
738 
739 }
740 
741 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
742 					struct radeon_ps *rps)
743 {
744 	struct ci_ps *ps = ci_get_ps(rps);
745 	struct ci_power_info *pi = ci_get_pi(rdev);
746 	struct radeon_clock_and_voltage_limits *max_limits;
747 	bool disable_mclk_switching;
748 	u32 sclk, mclk;
749 	int i;
750 
751 	if (rps->vce_active) {
752 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
753 		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
754 	} else {
755 		rps->evclk = 0;
756 		rps->ecclk = 0;
757 	}
758 
759 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
760 	    ci_dpm_vblank_too_short(rdev))
761 		disable_mclk_switching = true;
762 	else
763 		disable_mclk_switching = false;
764 
765 	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
766 		pi->battery_state = true;
767 	else
768 		pi->battery_state = false;
769 
770 	if (rdev->pm.dpm.ac_power)
771 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
772 	else
773 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
774 
775 	if (rdev->pm.dpm.ac_power == false) {
776 		for (i = 0; i < ps->performance_level_count; i++) {
777 			if (ps->performance_levels[i].mclk > max_limits->mclk)
778 				ps->performance_levels[i].mclk = max_limits->mclk;
779 			if (ps->performance_levels[i].sclk > max_limits->sclk)
780 				ps->performance_levels[i].sclk = max_limits->sclk;
781 		}
782 	}
783 
784 	/* XXX validate the min clocks required for display */
785 
786 	if (disable_mclk_switching) {
787 		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
788 		sclk = ps->performance_levels[0].sclk;
789 	} else {
790 		mclk = ps->performance_levels[0].mclk;
791 		sclk = ps->performance_levels[0].sclk;
792 	}
793 
794 	if (rps->vce_active) {
795 		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
796 			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
797 		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
798 			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
799 	}
800 
801 	ps->performance_levels[0].sclk = sclk;
802 	ps->performance_levels[0].mclk = mclk;
803 
804 	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
805 		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
806 
807 	if (disable_mclk_switching) {
808 		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
809 			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
810 	} else {
811 		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
812 			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
813 	}
814 }
815 
816 static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
817 					    int min_temp, int max_temp)
818 {
819 	int low_temp = 0 * 1000;
820 	int high_temp = 255 * 1000;
821 	u32 tmp;
822 
823 	if (low_temp < min_temp)
824 		low_temp = min_temp;
825 	if (high_temp > max_temp)
826 		high_temp = max_temp;
827 	if (high_temp < low_temp) {
828 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
829 		return -EINVAL;
830 	}
831 
832 	tmp = RREG32_SMC(CG_THERMAL_INT);
833 	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
834 	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
835 		CI_DIG_THERM_INTL(low_temp / 1000);
836 	WREG32_SMC(CG_THERMAL_INT, tmp);
837 
838 #if 0
839 	/* XXX: need to figure out how to handle this properly */
840 	tmp = RREG32_SMC(CG_THERMAL_CTRL);
841 	tmp &= DIG_THERM_DPM_MASK;
842 	tmp |= DIG_THERM_DPM(high_temp / 1000);
843 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
844 #endif
845 
846 	rdev->pm.dpm.thermal.min_temp = low_temp;
847 	rdev->pm.dpm.thermal.max_temp = high_temp;
848 
849 	return 0;
850 }
851 
852 #if 0
853 static int ci_read_smc_soft_register(struct radeon_device *rdev,
854 				     u16 reg_offset, u32 *value)
855 {
856 	struct ci_power_info *pi = ci_get_pi(rdev);
857 
858 	return ci_read_smc_sram_dword(rdev,
859 				      pi->soft_regs_start + reg_offset,
860 				      value, pi->sram_end);
861 }
862 #endif
863 
864 static int ci_write_smc_soft_register(struct radeon_device *rdev,
865 				      u16 reg_offset, u32 value)
866 {
867 	struct ci_power_info *pi = ci_get_pi(rdev);
868 
869 	return ci_write_smc_sram_dword(rdev,
870 				       pi->soft_regs_start + reg_offset,
871 				       value, pi->sram_end);
872 }
873 
874 static void ci_init_fps_limits(struct radeon_device *rdev)
875 {
876 	struct ci_power_info *pi = ci_get_pi(rdev);
877 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
878 
879 	if (pi->caps_fps) {
880 		u16 tmp;
881 
882 		tmp = 45;
883 		table->FpsHighT = cpu_to_be16(tmp);
884 
885 		tmp = 30;
886 		table->FpsLowT = cpu_to_be16(tmp);
887 	}
888 }
889 
890 static int ci_update_sclk_t(struct radeon_device *rdev)
891 {
892 	struct ci_power_info *pi = ci_get_pi(rdev);
893 	int ret = 0;
894 	u32 low_sclk_interrupt_t = 0;
895 
896 	if (pi->caps_sclk_throttle_low_notification) {
897 		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
898 
899 		ret = ci_copy_bytes_to_smc(rdev,
900 					   pi->dpm_table_start +
901 					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
902 					   (u8 *)&low_sclk_interrupt_t,
903 					   sizeof(u32), pi->sram_end);
904 
905 	}
906 
907 	return ret;
908 }
909 
910 static void ci_get_leakage_voltages(struct radeon_device *rdev)
911 {
912 	struct ci_power_info *pi = ci_get_pi(rdev);
913 	u16 leakage_id, virtual_voltage_id;
914 	u16 vddc, vddci;
915 	int i;
916 
917 	pi->vddc_leakage.count = 0;
918 	pi->vddci_leakage.count = 0;
919 
920 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
921 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
922 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
923 			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
924 				continue;
925 			if (vddc != 0 && vddc != virtual_voltage_id) {
926 				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
927 				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
928 				pi->vddc_leakage.count++;
929 			}
930 		}
931 	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
932 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
933 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
934 			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
935 										 virtual_voltage_id,
936 										 leakage_id) == 0) {
937 				if (vddc != 0 && vddc != virtual_voltage_id) {
938 					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
939 					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
940 					pi->vddc_leakage.count++;
941 				}
942 				if (vddci != 0 && vddci != virtual_voltage_id) {
943 					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
944 					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
945 					pi->vddci_leakage.count++;
946 				}
947 			}
948 		}
949 	}
950 }
951 
952 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
953 {
954 	struct ci_power_info *pi = ci_get_pi(rdev);
955 	bool want_thermal_protection;
956 	enum radeon_dpm_event_src dpm_event_src;
957 	u32 tmp;
958 
959 	switch (sources) {
960 	case 0:
961 	default:
962 		want_thermal_protection = false;
963 		break;
964 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
965 		want_thermal_protection = true;
966 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
967 		break;
968 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
969 		want_thermal_protection = true;
970 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
971 		break;
972 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
973 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
974 		want_thermal_protection = true;
975 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
976 		break;
977 	}
978 
979 	if (want_thermal_protection) {
980 #if 0
981 		/* XXX: need to figure out how to handle this properly */
982 		tmp = RREG32_SMC(CG_THERMAL_CTRL);
983 		tmp &= DPM_EVENT_SRC_MASK;
984 		tmp |= DPM_EVENT_SRC(dpm_event_src);
985 		WREG32_SMC(CG_THERMAL_CTRL, tmp);
986 #endif
987 
988 		tmp = RREG32_SMC(GENERAL_PWRMGT);
989 		if (pi->thermal_protection)
990 			tmp &= ~THERMAL_PROTECTION_DIS;
991 		else
992 			tmp |= THERMAL_PROTECTION_DIS;
993 		WREG32_SMC(GENERAL_PWRMGT, tmp);
994 	} else {
995 		tmp = RREG32_SMC(GENERAL_PWRMGT);
996 		tmp |= THERMAL_PROTECTION_DIS;
997 		WREG32_SMC(GENERAL_PWRMGT, tmp);
998 	}
999 }
1000 
1001 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1002 					   enum radeon_dpm_auto_throttle_src source,
1003 					   bool enable)
1004 {
1005 	struct ci_power_info *pi = ci_get_pi(rdev);
1006 
1007 	if (enable) {
1008 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1009 			pi->active_auto_throttle_sources |= 1 << source;
1010 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1011 		}
1012 	} else {
1013 		if (pi->active_auto_throttle_sources & (1 << source)) {
1014 			pi->active_auto_throttle_sources &= ~(1 << source);
1015 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1016 		}
1017 	}
1018 }
1019 
1020 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1021 {
1022 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1023 		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1024 }
1025 
1026 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1027 {
1028 	struct ci_power_info *pi = ci_get_pi(rdev);
1029 	PPSMC_Result smc_result;
1030 
1031 	if (!pi->need_update_smu7_dpm_table)
1032 		return 0;
1033 
1034 	if ((!pi->sclk_dpm_key_disabled) &&
1035 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1036 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1037 		if (smc_result != PPSMC_Result_OK)
1038 			return -EINVAL;
1039 	}
1040 
1041 	if ((!pi->mclk_dpm_key_disabled) &&
1042 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1043 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1044 		if (smc_result != PPSMC_Result_OK)
1045 			return -EINVAL;
1046 	}
1047 
1048 	pi->need_update_smu7_dpm_table = 0;
1049 	return 0;
1050 }
1051 
1052 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1053 {
1054 	struct ci_power_info *pi = ci_get_pi(rdev);
1055 	PPSMC_Result smc_result;
1056 
1057 	if (enable) {
1058 		if (!pi->sclk_dpm_key_disabled) {
1059 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1060 			if (smc_result != PPSMC_Result_OK)
1061 				return -EINVAL;
1062 		}
1063 
1064 		if (!pi->mclk_dpm_key_disabled) {
1065 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1066 			if (smc_result != PPSMC_Result_OK)
1067 				return -EINVAL;
1068 
1069 			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1070 
1071 			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1072 			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1073 			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1074 
1075 			udelay(10);
1076 
1077 			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1078 			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1079 			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1080 		}
1081 	} else {
1082 		if (!pi->sclk_dpm_key_disabled) {
1083 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1084 			if (smc_result != PPSMC_Result_OK)
1085 				return -EINVAL;
1086 		}
1087 
1088 		if (!pi->mclk_dpm_key_disabled) {
1089 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1090 			if (smc_result != PPSMC_Result_OK)
1091 				return -EINVAL;
1092 		}
1093 	}
1094 
1095 	return 0;
1096 }
1097 
1098 static int ci_start_dpm(struct radeon_device *rdev)
1099 {
1100 	struct ci_power_info *pi = ci_get_pi(rdev);
1101 	PPSMC_Result smc_result;
1102 	int ret;
1103 	u32 tmp;
1104 
1105 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1106 	tmp |= GLOBAL_PWRMGT_EN;
1107 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1108 
1109 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1110 	tmp |= DYNAMIC_PM_EN;
1111 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1112 
1113 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1114 
1115 	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1116 
1117 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1118 	if (smc_result != PPSMC_Result_OK)
1119 		return -EINVAL;
1120 
1121 	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1122 	if (ret)
1123 		return ret;
1124 
1125 	if (!pi->pcie_dpm_key_disabled) {
1126 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1127 		if (smc_result != PPSMC_Result_OK)
1128 			return -EINVAL;
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1135 {
1136 	struct ci_power_info *pi = ci_get_pi(rdev);
1137 	PPSMC_Result smc_result;
1138 
1139 	if (!pi->need_update_smu7_dpm_table)
1140 		return 0;
1141 
1142 	if ((!pi->sclk_dpm_key_disabled) &&
1143 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1144 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1145 		if (smc_result != PPSMC_Result_OK)
1146 			return -EINVAL;
1147 	}
1148 
1149 	if ((!pi->mclk_dpm_key_disabled) &&
1150 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1151 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1152 		if (smc_result != PPSMC_Result_OK)
1153 			return -EINVAL;
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static int ci_stop_dpm(struct radeon_device *rdev)
1160 {
1161 	struct ci_power_info *pi = ci_get_pi(rdev);
1162 	PPSMC_Result smc_result;
1163 	int ret;
1164 	u32 tmp;
1165 
1166 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1167 	tmp &= ~GLOBAL_PWRMGT_EN;
1168 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1169 
1170 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1171 	tmp &= ~DYNAMIC_PM_EN;
1172 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1173 
1174 	if (!pi->pcie_dpm_key_disabled) {
1175 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1176 		if (smc_result != PPSMC_Result_OK)
1177 			return -EINVAL;
1178 	}
1179 
1180 	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1181 	if (ret)
1182 		return ret;
1183 
1184 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1185 	if (smc_result != PPSMC_Result_OK)
1186 		return -EINVAL;
1187 
1188 	return 0;
1189 }
1190 
1191 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1192 {
1193 	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1194 
1195 	if (enable)
1196 		tmp &= ~SCLK_PWRMGT_OFF;
1197 	else
1198 		tmp |= SCLK_PWRMGT_OFF;
1199 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1200 }
1201 
1202 #if 0
1203 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1204 					bool ac_power)
1205 {
1206 	struct ci_power_info *pi = ci_get_pi(rdev);
1207 	struct radeon_cac_tdp_table *cac_tdp_table =
1208 		rdev->pm.dpm.dyn_state.cac_tdp_table;
1209 	u32 power_limit;
1210 
1211 	if (ac_power)
1212 		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1213 	else
1214 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1215 
1216         ci_set_power_limit(rdev, power_limit);
1217 
1218 	if (pi->caps_automatic_dc_transition) {
1219 		if (ac_power)
1220 			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1221 		else
1222 			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1223 	}
1224 
1225 	return 0;
1226 }
1227 #endif
1228 
1229 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1230 						      PPSMC_Msg msg, u32 parameter)
1231 {
1232 	WREG32(SMC_MSG_ARG_0, parameter);
1233 	return ci_send_msg_to_smc(rdev, msg);
1234 }
1235 
1236 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1237 							PPSMC_Msg msg, u32 *parameter)
1238 {
1239 	PPSMC_Result smc_result;
1240 
1241 	smc_result = ci_send_msg_to_smc(rdev, msg);
1242 
1243 	if ((smc_result == PPSMC_Result_OK) && parameter)
1244 		*parameter = RREG32(SMC_MSG_ARG_0);
1245 
1246 	return smc_result;
1247 }
1248 
1249 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1250 {
1251 	struct ci_power_info *pi = ci_get_pi(rdev);
1252 
1253 	if (!pi->sclk_dpm_key_disabled) {
1254 		PPSMC_Result smc_result =
1255 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1256 		if (smc_result != PPSMC_Result_OK)
1257 			return -EINVAL;
1258 	}
1259 
1260 	return 0;
1261 }
1262 
1263 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1264 {
1265 	struct ci_power_info *pi = ci_get_pi(rdev);
1266 
1267 	if (!pi->mclk_dpm_key_disabled) {
1268 		PPSMC_Result smc_result =
1269 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1270 		if (smc_result != PPSMC_Result_OK)
1271 			return -EINVAL;
1272 	}
1273 
1274 	return 0;
1275 }
1276 
1277 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1278 {
1279 	struct ci_power_info *pi = ci_get_pi(rdev);
1280 
1281 	if (!pi->pcie_dpm_key_disabled) {
1282 		PPSMC_Result smc_result =
1283 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1284 		if (smc_result != PPSMC_Result_OK)
1285 			return -EINVAL;
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1292 {
1293 	struct ci_power_info *pi = ci_get_pi(rdev);
1294 
1295 	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1296 		PPSMC_Result smc_result =
1297 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1298 		if (smc_result != PPSMC_Result_OK)
1299 			return -EINVAL;
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1306 				       u32 target_tdp)
1307 {
1308 	PPSMC_Result smc_result =
1309 		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1310 	if (smc_result != PPSMC_Result_OK)
1311 		return -EINVAL;
1312 	return 0;
1313 }
1314 
1315 static int ci_set_boot_state(struct radeon_device *rdev)
1316 {
1317 	return ci_enable_sclk_mclk_dpm(rdev, false);
1318 }
1319 
1320 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1321 {
1322 	u32 sclk_freq;
1323 	PPSMC_Result smc_result =
1324 		ci_send_msg_to_smc_return_parameter(rdev,
1325 						    PPSMC_MSG_API_GetSclkFrequency,
1326 						    &sclk_freq);
1327 	if (smc_result != PPSMC_Result_OK)
1328 		sclk_freq = 0;
1329 
1330 	return sclk_freq;
1331 }
1332 
1333 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1334 {
1335 	u32 mclk_freq;
1336 	PPSMC_Result smc_result =
1337 		ci_send_msg_to_smc_return_parameter(rdev,
1338 						    PPSMC_MSG_API_GetMclkFrequency,
1339 						    &mclk_freq);
1340 	if (smc_result != PPSMC_Result_OK)
1341 		mclk_freq = 0;
1342 
1343 	return mclk_freq;
1344 }
1345 
1346 static void ci_dpm_start_smc(struct radeon_device *rdev)
1347 {
1348 	int i;
1349 
1350 	ci_program_jump_on_start(rdev);
1351 	ci_start_smc_clock(rdev);
1352 	ci_start_smc(rdev);
1353 	for (i = 0; i < rdev->usec_timeout; i++) {
1354 		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1355 			break;
1356 	}
1357 }
1358 
1359 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1360 {
1361 	ci_reset_smc(rdev);
1362 	ci_stop_smc_clock(rdev);
1363 }
1364 
1365 static int ci_process_firmware_header(struct radeon_device *rdev)
1366 {
1367 	struct ci_power_info *pi = ci_get_pi(rdev);
1368 	u32 tmp;
1369 	int ret;
1370 
1371 	ret = ci_read_smc_sram_dword(rdev,
1372 				     SMU7_FIRMWARE_HEADER_LOCATION +
1373 				     offsetof(SMU7_Firmware_Header, DpmTable),
1374 				     &tmp, pi->sram_end);
1375 	if (ret)
1376 		return ret;
1377 
1378 	pi->dpm_table_start = tmp;
1379 
1380 	ret = ci_read_smc_sram_dword(rdev,
1381 				     SMU7_FIRMWARE_HEADER_LOCATION +
1382 				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1383 				     &tmp, pi->sram_end);
1384 	if (ret)
1385 		return ret;
1386 
1387 	pi->soft_regs_start = tmp;
1388 
1389 	ret = ci_read_smc_sram_dword(rdev,
1390 				     SMU7_FIRMWARE_HEADER_LOCATION +
1391 				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1392 				     &tmp, pi->sram_end);
1393 	if (ret)
1394 		return ret;
1395 
1396 	pi->mc_reg_table_start = tmp;
1397 
1398 	ret = ci_read_smc_sram_dword(rdev,
1399 				     SMU7_FIRMWARE_HEADER_LOCATION +
1400 				     offsetof(SMU7_Firmware_Header, FanTable),
1401 				     &tmp, pi->sram_end);
1402 	if (ret)
1403 		return ret;
1404 
1405 	pi->fan_table_start = tmp;
1406 
1407 	ret = ci_read_smc_sram_dword(rdev,
1408 				     SMU7_FIRMWARE_HEADER_LOCATION +
1409 				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1410 				     &tmp, pi->sram_end);
1411 	if (ret)
1412 		return ret;
1413 
1414 	pi->arb_table_start = tmp;
1415 
1416 	return 0;
1417 }
1418 
1419 static void ci_read_clock_registers(struct radeon_device *rdev)
1420 {
1421 	struct ci_power_info *pi = ci_get_pi(rdev);
1422 
1423 	pi->clock_registers.cg_spll_func_cntl =
1424 		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1425 	pi->clock_registers.cg_spll_func_cntl_2 =
1426 		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1427 	pi->clock_registers.cg_spll_func_cntl_3 =
1428 		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1429 	pi->clock_registers.cg_spll_func_cntl_4 =
1430 		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1431 	pi->clock_registers.cg_spll_spread_spectrum =
1432 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1433 	pi->clock_registers.cg_spll_spread_spectrum_2 =
1434 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1435 	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1436 	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1437 	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1438 	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1439 	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1440 	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1441 	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1442 	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1443 	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1444 }
1445 
1446 static void ci_init_sclk_t(struct radeon_device *rdev)
1447 {
1448 	struct ci_power_info *pi = ci_get_pi(rdev);
1449 
1450 	pi->low_sclk_interrupt_t = 0;
1451 }
1452 
1453 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1454 					 bool enable)
1455 {
1456 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1457 
1458 	if (enable)
1459 		tmp &= ~THERMAL_PROTECTION_DIS;
1460 	else
1461 		tmp |= THERMAL_PROTECTION_DIS;
1462 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1463 }
1464 
1465 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1466 {
1467 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1468 
1469 	tmp |= STATIC_PM_EN;
1470 
1471 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1472 }
1473 
1474 #if 0
1475 static int ci_enter_ulp_state(struct radeon_device *rdev)
1476 {
1477 
1478 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1479 
1480 	udelay(25000);
1481 
1482 	return 0;
1483 }
1484 
1485 static int ci_exit_ulp_state(struct radeon_device *rdev)
1486 {
1487 	int i;
1488 
1489 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1490 
1491 	udelay(7000);
1492 
1493 	for (i = 0; i < rdev->usec_timeout; i++) {
1494 		if (RREG32(SMC_RESP_0) == 1)
1495 			break;
1496 		udelay(1000);
1497 	}
1498 
1499 	return 0;
1500 }
1501 #endif
1502 
1503 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1504 					bool has_display)
1505 {
1506 	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1507 
1508 	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1509 }
1510 
1511 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1512 				      bool enable)
1513 {
1514 	struct ci_power_info *pi = ci_get_pi(rdev);
1515 
1516 	if (enable) {
1517 		if (pi->caps_sclk_ds) {
1518 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1519 				return -EINVAL;
1520 		} else {
1521 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1522 				return -EINVAL;
1523 		}
1524 	} else {
1525 		if (pi->caps_sclk_ds) {
1526 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1527 				return -EINVAL;
1528 		}
1529 	}
1530 
1531 	return 0;
1532 }
1533 
1534 static void ci_program_display_gap(struct radeon_device *rdev)
1535 {
1536 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1537 	u32 pre_vbi_time_in_us;
1538 	u32 frame_time_in_us;
1539 	u32 ref_clock = rdev->clock.spll.reference_freq;
1540 	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1541 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1542 
1543 	tmp &= ~DISP_GAP_MASK;
1544 	if (rdev->pm.dpm.new_active_crtc_count > 0)
1545 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1546 	else
1547 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1548 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1549 
1550 	if (refresh_rate == 0)
1551 		refresh_rate = 60;
1552 	if (vblank_time == 0xffffffff)
1553 		vblank_time = 500;
1554 	frame_time_in_us = 1000000 / refresh_rate;
1555 	pre_vbi_time_in_us =
1556 		frame_time_in_us - 200 - vblank_time;
1557 	tmp = pre_vbi_time_in_us * (ref_clock / 100);
1558 
1559 	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1560 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1561 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1562 
1563 
1564 	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1565 
1566 }
1567 
1568 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1569 {
1570 	struct ci_power_info *pi = ci_get_pi(rdev);
1571 	u32 tmp;
1572 
1573 	if (enable) {
1574 		if (pi->caps_sclk_ss_support) {
1575 			tmp = RREG32_SMC(GENERAL_PWRMGT);
1576 			tmp |= DYN_SPREAD_SPECTRUM_EN;
1577 			WREG32_SMC(GENERAL_PWRMGT, tmp);
1578 		}
1579 	} else {
1580 		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1581 		tmp &= ~SSEN;
1582 		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1583 
1584 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1585 		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1586 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1587 	}
1588 }
1589 
1590 static void ci_program_sstp(struct radeon_device *rdev)
1591 {
1592 	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1593 }
1594 
1595 static void ci_enable_display_gap(struct radeon_device *rdev)
1596 {
1597 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1598 
1599         tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1600         tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1601                 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1602 
1603 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1604 }
1605 
1606 static void ci_program_vc(struct radeon_device *rdev)
1607 {
1608 	u32 tmp;
1609 
1610 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1611 	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1612 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1613 
1614 	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1615 	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1616 	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1617 	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1618 	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1619 	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1620 	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1621 	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1622 }
1623 
1624 static void ci_clear_vc(struct radeon_device *rdev)
1625 {
1626 	u32 tmp;
1627 
1628 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1629 	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1630 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1631 
1632 	WREG32_SMC(CG_FTV_0, 0);
1633 	WREG32_SMC(CG_FTV_1, 0);
1634 	WREG32_SMC(CG_FTV_2, 0);
1635 	WREG32_SMC(CG_FTV_3, 0);
1636 	WREG32_SMC(CG_FTV_4, 0);
1637 	WREG32_SMC(CG_FTV_5, 0);
1638 	WREG32_SMC(CG_FTV_6, 0);
1639 	WREG32_SMC(CG_FTV_7, 0);
1640 }
1641 
1642 static int ci_upload_firmware(struct radeon_device *rdev)
1643 {
1644 	struct ci_power_info *pi = ci_get_pi(rdev);
1645 	int i, ret;
1646 
1647 	for (i = 0; i < rdev->usec_timeout; i++) {
1648 		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1649 			break;
1650 	}
1651 	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1652 
1653 	ci_stop_smc_clock(rdev);
1654 	ci_reset_smc(rdev);
1655 
1656 	ret = ci_load_smc_ucode(rdev, pi->sram_end);
1657 
1658 	return ret;
1659 
1660 }
1661 
1662 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1663 				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1664 				     struct atom_voltage_table *voltage_table)
1665 {
1666 	u32 i;
1667 
1668 	if (voltage_dependency_table == NULL)
1669 		return -EINVAL;
1670 
1671 	voltage_table->mask_low = 0;
1672 	voltage_table->phase_delay = 0;
1673 
1674 	voltage_table->count = voltage_dependency_table->count;
1675 	for (i = 0; i < voltage_table->count; i++) {
1676 		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1677 		voltage_table->entries[i].smio_low = 0;
1678 	}
1679 
1680 	return 0;
1681 }
1682 
1683 static int ci_construct_voltage_tables(struct radeon_device *rdev)
1684 {
1685 	struct ci_power_info *pi = ci_get_pi(rdev);
1686 	int ret;
1687 
1688 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1689 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1690 						    VOLTAGE_OBJ_GPIO_LUT,
1691 						    &pi->vddc_voltage_table);
1692 		if (ret)
1693 			return ret;
1694 	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1695 		ret = ci_get_svi2_voltage_table(rdev,
1696 						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1697 						&pi->vddc_voltage_table);
1698 		if (ret)
1699 			return ret;
1700 	}
1701 
1702 	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1703 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1704 							 &pi->vddc_voltage_table);
1705 
1706 	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1707 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1708 						    VOLTAGE_OBJ_GPIO_LUT,
1709 						    &pi->vddci_voltage_table);
1710 		if (ret)
1711 			return ret;
1712 	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1713 		ret = ci_get_svi2_voltage_table(rdev,
1714 						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1715 						&pi->vddci_voltage_table);
1716 		if (ret)
1717 			return ret;
1718 	}
1719 
1720 	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1721 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1722 							 &pi->vddci_voltage_table);
1723 
1724 	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1725 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1726 						    VOLTAGE_OBJ_GPIO_LUT,
1727 						    &pi->mvdd_voltage_table);
1728 		if (ret)
1729 			return ret;
1730 	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1731 		ret = ci_get_svi2_voltage_table(rdev,
1732 						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1733 						&pi->mvdd_voltage_table);
1734 		if (ret)
1735 			return ret;
1736 	}
1737 
1738 	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1739 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1740 							 &pi->mvdd_voltage_table);
1741 
1742 	return 0;
1743 }
1744 
1745 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1746 					  struct atom_voltage_table_entry *voltage_table,
1747 					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
1748 {
1749 	int ret;
1750 
1751 	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1752 					    &smc_voltage_table->StdVoltageHiSidd,
1753 					    &smc_voltage_table->StdVoltageLoSidd);
1754 
1755 	if (ret) {
1756 		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1757 		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1758 	}
1759 
1760 	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1761 	smc_voltage_table->StdVoltageHiSidd =
1762 		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1763 	smc_voltage_table->StdVoltageLoSidd =
1764 		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1765 }
1766 
1767 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1768 				      SMU7_Discrete_DpmTable *table)
1769 {
1770 	struct ci_power_info *pi = ci_get_pi(rdev);
1771 	unsigned int count;
1772 
1773 	table->VddcLevelCount = pi->vddc_voltage_table.count;
1774 	for (count = 0; count < table->VddcLevelCount; count++) {
1775 		ci_populate_smc_voltage_table(rdev,
1776 					      &pi->vddc_voltage_table.entries[count],
1777 					      &table->VddcLevel[count]);
1778 
1779 		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1780 			table->VddcLevel[count].Smio |=
1781 				pi->vddc_voltage_table.entries[count].smio_low;
1782 		else
1783 			table->VddcLevel[count].Smio = 0;
1784 	}
1785 	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1786 
1787 	return 0;
1788 }
1789 
1790 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1791 				       SMU7_Discrete_DpmTable *table)
1792 {
1793 	unsigned int count;
1794 	struct ci_power_info *pi = ci_get_pi(rdev);
1795 
1796 	table->VddciLevelCount = pi->vddci_voltage_table.count;
1797 	for (count = 0; count < table->VddciLevelCount; count++) {
1798 		ci_populate_smc_voltage_table(rdev,
1799 					      &pi->vddci_voltage_table.entries[count],
1800 					      &table->VddciLevel[count]);
1801 
1802 		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1803 			table->VddciLevel[count].Smio |=
1804 				pi->vddci_voltage_table.entries[count].smio_low;
1805 		else
1806 			table->VddciLevel[count].Smio = 0;
1807 	}
1808 	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1809 
1810 	return 0;
1811 }
1812 
1813 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1814 				      SMU7_Discrete_DpmTable *table)
1815 {
1816 	struct ci_power_info *pi = ci_get_pi(rdev);
1817 	unsigned int count;
1818 
1819 	table->MvddLevelCount = pi->mvdd_voltage_table.count;
1820 	for (count = 0; count < table->MvddLevelCount; count++) {
1821 		ci_populate_smc_voltage_table(rdev,
1822 					      &pi->mvdd_voltage_table.entries[count],
1823 					      &table->MvddLevel[count]);
1824 
1825 		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1826 			table->MvddLevel[count].Smio |=
1827 				pi->mvdd_voltage_table.entries[count].smio_low;
1828 		else
1829 			table->MvddLevel[count].Smio = 0;
1830 	}
1831 	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1832 
1833 	return 0;
1834 }
1835 
1836 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1837 					  SMU7_Discrete_DpmTable *table)
1838 {
1839 	int ret;
1840 
1841 	ret = ci_populate_smc_vddc_table(rdev, table);
1842 	if (ret)
1843 		return ret;
1844 
1845 	ret = ci_populate_smc_vddci_table(rdev, table);
1846 	if (ret)
1847 		return ret;
1848 
1849 	ret = ci_populate_smc_mvdd_table(rdev, table);
1850 	if (ret)
1851 		return ret;
1852 
1853 	return 0;
1854 }
1855 
1856 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1857 				  SMU7_Discrete_VoltageLevel *voltage)
1858 {
1859 	struct ci_power_info *pi = ci_get_pi(rdev);
1860 	u32 i = 0;
1861 
1862 	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1863 		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1864 			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1865 				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1866 				break;
1867 			}
1868 		}
1869 
1870 		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1871 			return -EINVAL;
1872 	}
1873 
1874 	return -EINVAL;
1875 }
1876 
1877 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1878 					 struct atom_voltage_table_entry *voltage_table,
1879 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1880 {
1881 	u16 v_index, idx;
1882 	bool voltage_found = false;
1883 	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1884 	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1885 
1886 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1887 		return -EINVAL;
1888 
1889 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1890 		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1891 			if (voltage_table->value ==
1892 			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1893 				voltage_found = true;
1894 				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1895 					idx = v_index;
1896 				else
1897 					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1898 				*std_voltage_lo_sidd =
1899 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1900 				*std_voltage_hi_sidd =
1901 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1902 				break;
1903 			}
1904 		}
1905 
1906 		if (!voltage_found) {
1907 			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1908 				if (voltage_table->value <=
1909 				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1910 					voltage_found = true;
1911 					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1912 						idx = v_index;
1913 					else
1914 						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1915 					*std_voltage_lo_sidd =
1916 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1917 					*std_voltage_hi_sidd =
1918 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1919 					break;
1920 				}
1921 			}
1922 		}
1923 	}
1924 
1925 	return 0;
1926 }
1927 
1928 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1929 						  const struct radeon_phase_shedding_limits_table *limits,
1930 						  u32 sclk,
1931 						  u32 *phase_shedding)
1932 {
1933 	unsigned int i;
1934 
1935 	*phase_shedding = 1;
1936 
1937 	for (i = 0; i < limits->count; i++) {
1938 		if (sclk < limits->entries[i].sclk) {
1939 			*phase_shedding = i;
1940 			break;
1941 		}
1942 	}
1943 }
1944 
1945 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1946 						  const struct radeon_phase_shedding_limits_table *limits,
1947 						  u32 mclk,
1948 						  u32 *phase_shedding)
1949 {
1950 	unsigned int i;
1951 
1952 	*phase_shedding = 1;
1953 
1954 	for (i = 0; i < limits->count; i++) {
1955 		if (mclk < limits->entries[i].mclk) {
1956 			*phase_shedding = i;
1957 			break;
1958 		}
1959 	}
1960 }
1961 
1962 static int ci_init_arb_table_index(struct radeon_device *rdev)
1963 {
1964 	struct ci_power_info *pi = ci_get_pi(rdev);
1965 	u32 tmp;
1966 	int ret;
1967 
1968 	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1969 				     &tmp, pi->sram_end);
1970 	if (ret)
1971 		return ret;
1972 
1973 	tmp &= 0x00FFFFFF;
1974 	tmp |= MC_CG_ARB_FREQ_F1 << 24;
1975 
1976 	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1977 				       tmp, pi->sram_end);
1978 }
1979 
1980 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1981 					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1982 					 u32 clock, u32 *voltage)
1983 {
1984 	u32 i = 0;
1985 
1986 	if (allowed_clock_voltage_table->count == 0)
1987 		return -EINVAL;
1988 
1989 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1990 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1991 			*voltage = allowed_clock_voltage_table->entries[i].v;
1992 			return 0;
1993 		}
1994 	}
1995 
1996 	*voltage = allowed_clock_voltage_table->entries[i-1].v;
1997 
1998 	return 0;
1999 }
2000 
2001 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2002 					     u32 sclk, u32 min_sclk_in_sr)
2003 {
2004 	u32 i;
2005 	u32 tmp;
2006 	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2007 		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2008 
2009 	if (sclk < min)
2010 		return 0;
2011 
2012 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2013 		tmp = sclk / (1 << i);
2014 		if (tmp >= min || i == 0)
2015 			break;
2016 	}
2017 
2018 	return (u8)i;
2019 }
2020 
2021 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2022 {
2023 	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2024 }
2025 
2026 static int ci_reset_to_default(struct radeon_device *rdev)
2027 {
2028 	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2029 		0 : -EINVAL;
2030 }
2031 
2032 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2033 {
2034 	u32 tmp;
2035 
2036 	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2037 
2038 	if (tmp == MC_CG_ARB_FREQ_F0)
2039 		return 0;
2040 
2041 	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2042 }
2043 
2044 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2045 						u32 sclk,
2046 						u32 mclk,
2047 						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2048 {
2049 	u32 dram_timing;
2050 	u32 dram_timing2;
2051 	u32 burst_time;
2052 
2053 	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2054 
2055 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2056 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2057 	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2058 
2059 	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2060 	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2061 	arb_regs->McArbBurstTime = (u8)burst_time;
2062 
2063 	return 0;
2064 }
2065 
2066 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2067 {
2068 	struct ci_power_info *pi = ci_get_pi(rdev);
2069 	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2070 	u32 i, j;
2071 	int ret =  0;
2072 
2073 	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2074 
2075 	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2076 		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2077 			ret = ci_populate_memory_timing_parameters(rdev,
2078 								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2079 								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2080 								   &arb_regs.entries[i][j]);
2081 			if (ret)
2082 				break;
2083 		}
2084 	}
2085 
2086 	if (ret == 0)
2087 		ret = ci_copy_bytes_to_smc(rdev,
2088 					   pi->arb_table_start,
2089 					   (u8 *)&arb_regs,
2090 					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2091 					   pi->sram_end);
2092 
2093 	return ret;
2094 }
2095 
2096 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2097 {
2098 	struct ci_power_info *pi = ci_get_pi(rdev);
2099 
2100 	if (pi->need_update_smu7_dpm_table == 0)
2101 		return 0;
2102 
2103 	return ci_do_program_memory_timing_parameters(rdev);
2104 }
2105 
2106 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2107 					  struct radeon_ps *radeon_boot_state)
2108 {
2109 	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2110 	struct ci_power_info *pi = ci_get_pi(rdev);
2111 	u32 level = 0;
2112 
2113 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2114 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2115 		    boot_state->performance_levels[0].sclk) {
2116 			pi->smc_state_table.GraphicsBootLevel = level;
2117 			break;
2118 		}
2119 	}
2120 
2121 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2122 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2123 		    boot_state->performance_levels[0].mclk) {
2124 			pi->smc_state_table.MemoryBootLevel = level;
2125 			break;
2126 		}
2127 	}
2128 }
2129 
2130 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2131 {
2132 	u32 i;
2133 	u32 mask_value = 0;
2134 
2135 	for (i = dpm_table->count; i > 0; i--) {
2136 		mask_value = mask_value << 1;
2137 		if (dpm_table->dpm_levels[i-1].enabled)
2138 			mask_value |= 0x1;
2139 		else
2140 			mask_value &= 0xFFFFFFFE;
2141 	}
2142 
2143 	return mask_value;
2144 }
2145 
2146 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2147 				       SMU7_Discrete_DpmTable *table)
2148 {
2149 	struct ci_power_info *pi = ci_get_pi(rdev);
2150 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2151 	u32 i;
2152 
2153 	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2154 		table->LinkLevel[i].PcieGenSpeed =
2155 			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2156 		table->LinkLevel[i].PcieLaneCount =
2157 			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2158 		table->LinkLevel[i].EnabledForActivity = 1;
2159 		table->LinkLevel[i].DownT = cpu_to_be32(5);
2160 		table->LinkLevel[i].UpT = cpu_to_be32(30);
2161 	}
2162 
2163 	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2164 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2165 		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2166 }
2167 
2168 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2169 				     SMU7_Discrete_DpmTable *table)
2170 {
2171 	u32 count;
2172 	struct atom_clock_dividers dividers;
2173 	int ret = -EINVAL;
2174 
2175 	table->UvdLevelCount =
2176 		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2177 
2178 	for (count = 0; count < table->UvdLevelCount; count++) {
2179 		table->UvdLevel[count].VclkFrequency =
2180 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2181 		table->UvdLevel[count].DclkFrequency =
2182 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2183 		table->UvdLevel[count].MinVddc =
2184 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2185 		table->UvdLevel[count].MinVddcPhases = 1;
2186 
2187 		ret = radeon_atom_get_clock_dividers(rdev,
2188 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2189 						     table->UvdLevel[count].VclkFrequency, false, &dividers);
2190 		if (ret)
2191 			return ret;
2192 
2193 		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2194 
2195 		ret = radeon_atom_get_clock_dividers(rdev,
2196 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2197 						     table->UvdLevel[count].DclkFrequency, false, &dividers);
2198 		if (ret)
2199 			return ret;
2200 
2201 		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2202 
2203 		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2204 		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2205 		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2206 	}
2207 
2208 	return ret;
2209 }
2210 
2211 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2212 				     SMU7_Discrete_DpmTable *table)
2213 {
2214 	u32 count;
2215 	struct atom_clock_dividers dividers;
2216 	int ret = -EINVAL;
2217 
2218 	table->VceLevelCount =
2219 		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2220 
2221 	for (count = 0; count < table->VceLevelCount; count++) {
2222 		table->VceLevel[count].Frequency =
2223 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2224 		table->VceLevel[count].MinVoltage =
2225 			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2226 		table->VceLevel[count].MinPhases = 1;
2227 
2228 		ret = radeon_atom_get_clock_dividers(rdev,
2229 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2230 						     table->VceLevel[count].Frequency, false, &dividers);
2231 		if (ret)
2232 			return ret;
2233 
2234 		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2235 
2236 		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2237 		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2238 	}
2239 
2240 	return ret;
2241 
2242 }
2243 
2244 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2245 				     SMU7_Discrete_DpmTable *table)
2246 {
2247 	u32 count;
2248 	struct atom_clock_dividers dividers;
2249 	int ret = -EINVAL;
2250 
2251 	table->AcpLevelCount = (u8)
2252 		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2253 
2254 	for (count = 0; count < table->AcpLevelCount; count++) {
2255 		table->AcpLevel[count].Frequency =
2256 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2257 		table->AcpLevel[count].MinVoltage =
2258 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2259 		table->AcpLevel[count].MinPhases = 1;
2260 
2261 		ret = radeon_atom_get_clock_dividers(rdev,
2262 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2263 						     table->AcpLevel[count].Frequency, false, &dividers);
2264 		if (ret)
2265 			return ret;
2266 
2267 		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2268 
2269 		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2270 		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2271 	}
2272 
2273 	return ret;
2274 }
2275 
2276 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2277 				      SMU7_Discrete_DpmTable *table)
2278 {
2279 	u32 count;
2280 	struct atom_clock_dividers dividers;
2281 	int ret = -EINVAL;
2282 
2283 	table->SamuLevelCount =
2284 		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2285 
2286 	for (count = 0; count < table->SamuLevelCount; count++) {
2287 		table->SamuLevel[count].Frequency =
2288 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2289 		table->SamuLevel[count].MinVoltage =
2290 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2291 		table->SamuLevel[count].MinPhases = 1;
2292 
2293 		ret = radeon_atom_get_clock_dividers(rdev,
2294 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2295 						     table->SamuLevel[count].Frequency, false, &dividers);
2296 		if (ret)
2297 			return ret;
2298 
2299 		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2300 
2301 		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2302 		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2303 	}
2304 
2305 	return ret;
2306 }
2307 
2308 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2309 				    u32 memory_clock,
2310 				    SMU7_Discrete_MemoryLevel *mclk,
2311 				    bool strobe_mode,
2312 				    bool dll_state_on)
2313 {
2314 	struct ci_power_info *pi = ci_get_pi(rdev);
2315 	u32  dll_cntl = pi->clock_registers.dll_cntl;
2316 	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2317 	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2318 	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2319 	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2320 	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2321 	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2322 	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2323 	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2324 	struct atom_mpll_param mpll_param;
2325 	int ret;
2326 
2327 	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2328 	if (ret)
2329 		return ret;
2330 
2331 	mpll_func_cntl &= ~BWCTRL_MASK;
2332 	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2333 
2334 	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2335 	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2336 		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2337 
2338 	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2339 	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2340 
2341 	if (pi->mem_gddr5) {
2342 		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2343 		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2344 			YCLK_POST_DIV(mpll_param.post_div);
2345 	}
2346 
2347 	if (pi->caps_mclk_ss_support) {
2348 		struct radeon_atom_ss ss;
2349 		u32 freq_nom;
2350 		u32 tmp;
2351 		u32 reference_clock = rdev->clock.mpll.reference_freq;
2352 
2353 		if (pi->mem_gddr5)
2354 			freq_nom = memory_clock * 4;
2355 		else
2356 			freq_nom = memory_clock * 2;
2357 
2358 		tmp = (freq_nom / reference_clock);
2359 		tmp = tmp * tmp;
2360 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2361 						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2362 			u32 clks = reference_clock * 5 / ss.rate;
2363 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2364 
2365 			mpll_ss1 &= ~CLKV_MASK;
2366 			mpll_ss1 |= CLKV(clkv);
2367 
2368 			mpll_ss2 &= ~CLKS_MASK;
2369 			mpll_ss2 |= CLKS(clks);
2370 		}
2371 	}
2372 
2373 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2374 	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2375 
2376 	if (dll_state_on)
2377 		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2378 	else
2379 		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2380 
2381 	mclk->MclkFrequency = memory_clock;
2382 	mclk->MpllFuncCntl = mpll_func_cntl;
2383 	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2384 	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2385 	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2386 	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2387 	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2388 	mclk->DllCntl = dll_cntl;
2389 	mclk->MpllSs1 = mpll_ss1;
2390 	mclk->MpllSs2 = mpll_ss2;
2391 
2392 	return 0;
2393 }
2394 
2395 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2396 					   u32 memory_clock,
2397 					   SMU7_Discrete_MemoryLevel *memory_level)
2398 {
2399 	struct ci_power_info *pi = ci_get_pi(rdev);
2400 	int ret;
2401 	bool dll_state_on;
2402 
2403 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2404 		ret = ci_get_dependency_volt_by_clk(rdev,
2405 						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2406 						    memory_clock, &memory_level->MinVddc);
2407 		if (ret)
2408 			return ret;
2409 	}
2410 
2411 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2412 		ret = ci_get_dependency_volt_by_clk(rdev,
2413 						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2414 						    memory_clock, &memory_level->MinVddci);
2415 		if (ret)
2416 			return ret;
2417 	}
2418 
2419 	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2420 		ret = ci_get_dependency_volt_by_clk(rdev,
2421 						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2422 						    memory_clock, &memory_level->MinMvdd);
2423 		if (ret)
2424 			return ret;
2425 	}
2426 
2427 	memory_level->MinVddcPhases = 1;
2428 
2429 	if (pi->vddc_phase_shed_control)
2430 		ci_populate_phase_value_based_on_mclk(rdev,
2431 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2432 						      memory_clock,
2433 						      &memory_level->MinVddcPhases);
2434 
2435 	memory_level->EnabledForThrottle = 1;
2436 	memory_level->EnabledForActivity = 1;
2437 	memory_level->UpH = 0;
2438 	memory_level->DownH = 100;
2439 	memory_level->VoltageDownH = 0;
2440 	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2441 
2442 	memory_level->StutterEnable = false;
2443 	memory_level->StrobeEnable = false;
2444 	memory_level->EdcReadEnable = false;
2445 	memory_level->EdcWriteEnable = false;
2446 	memory_level->RttEnable = false;
2447 
2448 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2449 
2450 	if (pi->mclk_stutter_mode_threshold &&
2451 	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2452 	    (pi->uvd_enabled == false) &&
2453 	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2454 	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2455 		memory_level->StutterEnable = true;
2456 
2457 	if (pi->mclk_strobe_mode_threshold &&
2458 	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2459 		memory_level->StrobeEnable = 1;
2460 
2461 	if (pi->mem_gddr5) {
2462 		memory_level->StrobeRatio =
2463 			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2464 		if (pi->mclk_edc_enable_threshold &&
2465 		    (memory_clock > pi->mclk_edc_enable_threshold))
2466 			memory_level->EdcReadEnable = true;
2467 
2468 		if (pi->mclk_edc_wr_enable_threshold &&
2469 		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2470 			memory_level->EdcWriteEnable = true;
2471 
2472 		if (memory_level->StrobeEnable) {
2473 			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2474 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2475 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2476 			else
2477 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2478 		} else {
2479 			dll_state_on = pi->dll_default_on;
2480 		}
2481 	} else {
2482 		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2483 		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2484 	}
2485 
2486 	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2487 	if (ret)
2488 		return ret;
2489 
2490 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2491 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2492         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2493         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2494 
2495 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2496 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2497 	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2498 	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2499 	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2500 	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2501 	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2502 	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2503 	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2504 	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2505 	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2506 
2507 	return 0;
2508 }
2509 
2510 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2511 				      SMU7_Discrete_DpmTable *table)
2512 {
2513 	struct ci_power_info *pi = ci_get_pi(rdev);
2514 	struct atom_clock_dividers dividers;
2515 	SMU7_Discrete_VoltageLevel voltage_level;
2516 	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2517 	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2518 	u32 dll_cntl = pi->clock_registers.dll_cntl;
2519 	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2520 	int ret;
2521 
2522 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2523 
2524 	if (pi->acpi_vddc)
2525 		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2526 	else
2527 		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2528 
2529 	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2530 
2531 	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2532 
2533 	ret = radeon_atom_get_clock_dividers(rdev,
2534 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2535 					     table->ACPILevel.SclkFrequency, false, &dividers);
2536 	if (ret)
2537 		return ret;
2538 
2539 	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2540 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2541 	table->ACPILevel.DeepSleepDivId = 0;
2542 
2543 	spll_func_cntl &= ~SPLL_PWRON;
2544 	spll_func_cntl |= SPLL_RESET;
2545 
2546 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2547 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2548 
2549 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2550 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2551 	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2552 	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2553 	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2554 	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2555 	table->ACPILevel.CcPwrDynRm = 0;
2556 	table->ACPILevel.CcPwrDynRm1 = 0;
2557 
2558 	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2559 	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2560 	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2561 	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2562 	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2563 	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2564 	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2565 	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2566 	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2567 	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2568 	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2569 
2570 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2571 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2572 
2573 	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2574 		if (pi->acpi_vddci)
2575 			table->MemoryACPILevel.MinVddci =
2576 				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2577 		else
2578 			table->MemoryACPILevel.MinVddci =
2579 				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2580 	}
2581 
2582 	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2583 		table->MemoryACPILevel.MinMvdd = 0;
2584 	else
2585 		table->MemoryACPILevel.MinMvdd =
2586 			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2587 
2588 	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2589 	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2590 
2591 	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2592 
2593 	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2594 	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2595 	table->MemoryACPILevel.MpllAdFuncCntl =
2596 		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2597 	table->MemoryACPILevel.MpllDqFuncCntl =
2598 		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2599 	table->MemoryACPILevel.MpllFuncCntl =
2600 		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2601 	table->MemoryACPILevel.MpllFuncCntl_1 =
2602 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2603 	table->MemoryACPILevel.MpllFuncCntl_2 =
2604 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2605 	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2606 	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2607 
2608 	table->MemoryACPILevel.EnabledForThrottle = 0;
2609 	table->MemoryACPILevel.EnabledForActivity = 0;
2610 	table->MemoryACPILevel.UpH = 0;
2611 	table->MemoryACPILevel.DownH = 100;
2612 	table->MemoryACPILevel.VoltageDownH = 0;
2613 	table->MemoryACPILevel.ActivityLevel =
2614 		cpu_to_be16((u16)pi->mclk_activity_target);
2615 
2616 	table->MemoryACPILevel.StutterEnable = false;
2617 	table->MemoryACPILevel.StrobeEnable = false;
2618 	table->MemoryACPILevel.EdcReadEnable = false;
2619 	table->MemoryACPILevel.EdcWriteEnable = false;
2620 	table->MemoryACPILevel.RttEnable = false;
2621 
2622 	return 0;
2623 }
2624 
2625 
2626 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2627 {
2628 	struct ci_power_info *pi = ci_get_pi(rdev);
2629 	struct ci_ulv_parm *ulv = &pi->ulv;
2630 
2631 	if (ulv->supported) {
2632 		if (enable)
2633 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2634 				0 : -EINVAL;
2635 		else
2636 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2637 				0 : -EINVAL;
2638 	}
2639 
2640 	return 0;
2641 }
2642 
2643 static int ci_populate_ulv_level(struct radeon_device *rdev,
2644 				 SMU7_Discrete_Ulv *state)
2645 {
2646 	struct ci_power_info *pi = ci_get_pi(rdev);
2647 	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2648 
2649 	state->CcPwrDynRm = 0;
2650 	state->CcPwrDynRm1 = 0;
2651 
2652 	if (ulv_voltage == 0) {
2653 		pi->ulv.supported = false;
2654 		return 0;
2655 	}
2656 
2657 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2658 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2659 			state->VddcOffset = 0;
2660 		else
2661 			state->VddcOffset =
2662 				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2663 	} else {
2664 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2665 			state->VddcOffsetVid = 0;
2666 		else
2667 			state->VddcOffsetVid = (u8)
2668 				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2669 				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2670 	}
2671 	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2672 
2673 	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2674 	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2675 	state->VddcOffset = cpu_to_be16(state->VddcOffset);
2676 
2677 	return 0;
2678 }
2679 
2680 static int ci_calculate_sclk_params(struct radeon_device *rdev,
2681 				    u32 engine_clock,
2682 				    SMU7_Discrete_GraphicsLevel *sclk)
2683 {
2684 	struct ci_power_info *pi = ci_get_pi(rdev);
2685 	struct atom_clock_dividers dividers;
2686 	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2687 	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2688 	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2689 	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2690 	u32 reference_clock = rdev->clock.spll.reference_freq;
2691 	u32 reference_divider;
2692 	u32 fbdiv;
2693 	int ret;
2694 
2695 	ret = radeon_atom_get_clock_dividers(rdev,
2696 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2697 					     engine_clock, false, &dividers);
2698 	if (ret)
2699 		return ret;
2700 
2701 	reference_divider = 1 + dividers.ref_div;
2702 	fbdiv = dividers.fb_div & 0x3FFFFFF;
2703 
2704 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2705 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2706         spll_func_cntl_3 |= SPLL_DITHEN;
2707 
2708 	if (pi->caps_sclk_ss_support) {
2709 		struct radeon_atom_ss ss;
2710 		u32 vco_freq = engine_clock * dividers.post_div;
2711 
2712 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2713 						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2714 			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2715 			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2716 
2717 			cg_spll_spread_spectrum &= ~CLK_S_MASK;
2718 			cg_spll_spread_spectrum |= CLK_S(clk_s);
2719 			cg_spll_spread_spectrum |= SSEN;
2720 
2721 			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2722 			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2723 		}
2724 	}
2725 
2726 	sclk->SclkFrequency = engine_clock;
2727 	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2728 	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2729 	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2730 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
2731 	sclk->SclkDid = (u8)dividers.post_divider;
2732 
2733 	return 0;
2734 }
2735 
2736 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2737 					    u32 engine_clock,
2738 					    u16 sclk_activity_level_t,
2739 					    SMU7_Discrete_GraphicsLevel *graphic_level)
2740 {
2741 	struct ci_power_info *pi = ci_get_pi(rdev);
2742 	int ret;
2743 
2744 	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2745 	if (ret)
2746 		return ret;
2747 
2748 	ret = ci_get_dependency_volt_by_clk(rdev,
2749 					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2750 					    engine_clock, &graphic_level->MinVddc);
2751 	if (ret)
2752 		return ret;
2753 
2754 	graphic_level->SclkFrequency = engine_clock;
2755 
2756 	graphic_level->Flags =  0;
2757 	graphic_level->MinVddcPhases = 1;
2758 
2759 	if (pi->vddc_phase_shed_control)
2760 		ci_populate_phase_value_based_on_sclk(rdev,
2761 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2762 						      engine_clock,
2763 						      &graphic_level->MinVddcPhases);
2764 
2765 	graphic_level->ActivityLevel = sclk_activity_level_t;
2766 
2767 	graphic_level->CcPwrDynRm = 0;
2768 	graphic_level->CcPwrDynRm1 = 0;
2769 	graphic_level->EnabledForActivity = 1;
2770 	graphic_level->EnabledForThrottle = 1;
2771 	graphic_level->UpH = 0;
2772 	graphic_level->DownH = 0;
2773 	graphic_level->VoltageDownH = 0;
2774 	graphic_level->PowerThrottle = 0;
2775 
2776 	if (pi->caps_sclk_ds)
2777 		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2778 										   engine_clock,
2779 										   CISLAND_MINIMUM_ENGINE_CLOCK);
2780 
2781 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2782 
2783 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2784         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2785 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2786 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2787 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2788 	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2789 	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2790 	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2791 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2792 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2793 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2794 
2795 	return 0;
2796 }
2797 
2798 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2799 {
2800 	struct ci_power_info *pi = ci_get_pi(rdev);
2801 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2802 	u32 level_array_address = pi->dpm_table_start +
2803 		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2804 	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2805 		SMU7_MAX_LEVELS_GRAPHICS;
2806 	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2807 	u32 i, ret;
2808 
2809 	memset(levels, 0, level_array_size);
2810 
2811 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
2812 		ret = ci_populate_single_graphic_level(rdev,
2813 						       dpm_table->sclk_table.dpm_levels[i].value,
2814 						       (u16)pi->activity_target[i],
2815 						       &pi->smc_state_table.GraphicsLevel[i]);
2816 		if (ret)
2817 			return ret;
2818 		if (i == (dpm_table->sclk_table.count - 1))
2819 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2820 				PPSMC_DISPLAY_WATERMARK_HIGH;
2821 	}
2822 
2823 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2824 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2825 		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2826 
2827 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2828 				   (u8 *)levels, level_array_size,
2829 				   pi->sram_end);
2830 	if (ret)
2831 		return ret;
2832 
2833 	return 0;
2834 }
2835 
2836 static int ci_populate_ulv_state(struct radeon_device *rdev,
2837 				 SMU7_Discrete_Ulv *ulv_level)
2838 {
2839 	return ci_populate_ulv_level(rdev, ulv_level);
2840 }
2841 
2842 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2843 {
2844 	struct ci_power_info *pi = ci_get_pi(rdev);
2845 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2846 	u32 level_array_address = pi->dpm_table_start +
2847 		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2848 	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2849 		SMU7_MAX_LEVELS_MEMORY;
2850 	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2851 	u32 i, ret;
2852 
2853 	memset(levels, 0, level_array_size);
2854 
2855 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
2856 		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2857 			return -EINVAL;
2858 		ret = ci_populate_single_memory_level(rdev,
2859 						      dpm_table->mclk_table.dpm_levels[i].value,
2860 						      &pi->smc_state_table.MemoryLevel[i]);
2861 		if (ret)
2862 			return ret;
2863 	}
2864 
2865 	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2866 
2867 	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2868 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2869 		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2870 
2871 	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2872 		PPSMC_DISPLAY_WATERMARK_HIGH;
2873 
2874 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2875 				   (u8 *)levels, level_array_size,
2876 				   pi->sram_end);
2877 	if (ret)
2878 		return ret;
2879 
2880 	return 0;
2881 }
2882 
2883 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2884 				      struct ci_single_dpm_table* dpm_table,
2885 				      u32 count)
2886 {
2887 	u32 i;
2888 
2889 	dpm_table->count = count;
2890 	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2891 		dpm_table->dpm_levels[i].enabled = false;
2892 }
2893 
2894 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2895 				      u32 index, u32 pcie_gen, u32 pcie_lanes)
2896 {
2897 	dpm_table->dpm_levels[index].value = pcie_gen;
2898 	dpm_table->dpm_levels[index].param1 = pcie_lanes;
2899 	dpm_table->dpm_levels[index].enabled = true;
2900 }
2901 
2902 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2903 {
2904 	struct ci_power_info *pi = ci_get_pi(rdev);
2905 
2906 	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2907 		return -EINVAL;
2908 
2909 	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2910 		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2911 		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2912 	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2913 		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2914 		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2915 	}
2916 
2917 	ci_reset_single_dpm_table(rdev,
2918 				  &pi->dpm_table.pcie_speed_table,
2919 				  SMU7_MAX_LEVELS_LINK);
2920 
2921 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2922 				  pi->pcie_gen_powersaving.min,
2923 				  pi->pcie_lane_powersaving.min);
2924 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2925 				  pi->pcie_gen_performance.min,
2926 				  pi->pcie_lane_performance.min);
2927 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2928 				  pi->pcie_gen_powersaving.min,
2929 				  pi->pcie_lane_powersaving.max);
2930 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2931 				  pi->pcie_gen_performance.min,
2932 				  pi->pcie_lane_performance.max);
2933 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2934 				  pi->pcie_gen_powersaving.max,
2935 				  pi->pcie_lane_powersaving.max);
2936 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2937 				  pi->pcie_gen_performance.max,
2938 				  pi->pcie_lane_performance.max);
2939 
2940 	pi->dpm_table.pcie_speed_table.count = 6;
2941 
2942 	return 0;
2943 }
2944 
2945 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2946 {
2947 	struct ci_power_info *pi = ci_get_pi(rdev);
2948 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2949 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2950 	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2951 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2952 	struct radeon_cac_leakage_table *std_voltage_table =
2953 		&rdev->pm.dpm.dyn_state.cac_leakage_table;
2954 	u32 i;
2955 
2956 	if (allowed_sclk_vddc_table == NULL)
2957 		return -EINVAL;
2958 	if (allowed_sclk_vddc_table->count < 1)
2959 		return -EINVAL;
2960 	if (allowed_mclk_table == NULL)
2961 		return -EINVAL;
2962 	if (allowed_mclk_table->count < 1)
2963 		return -EINVAL;
2964 
2965 	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2966 
2967 	ci_reset_single_dpm_table(rdev,
2968 				  &pi->dpm_table.sclk_table,
2969 				  SMU7_MAX_LEVELS_GRAPHICS);
2970 	ci_reset_single_dpm_table(rdev,
2971 				  &pi->dpm_table.mclk_table,
2972 				  SMU7_MAX_LEVELS_MEMORY);
2973 	ci_reset_single_dpm_table(rdev,
2974 				  &pi->dpm_table.vddc_table,
2975 				  SMU7_MAX_LEVELS_VDDC);
2976 	ci_reset_single_dpm_table(rdev,
2977 				  &pi->dpm_table.vddci_table,
2978 				  SMU7_MAX_LEVELS_VDDCI);
2979 	ci_reset_single_dpm_table(rdev,
2980 				  &pi->dpm_table.mvdd_table,
2981 				  SMU7_MAX_LEVELS_MVDD);
2982 
2983 	pi->dpm_table.sclk_table.count = 0;
2984 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2985 		if ((i == 0) ||
2986 		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2987 		     allowed_sclk_vddc_table->entries[i].clk)) {
2988 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2989 				allowed_sclk_vddc_table->entries[i].clk;
2990 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2991 			pi->dpm_table.sclk_table.count++;
2992 		}
2993 	}
2994 
2995 	pi->dpm_table.mclk_table.count = 0;
2996 	for (i = 0; i < allowed_mclk_table->count; i++) {
2997 		if ((i==0) ||
2998 		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2999 		     allowed_mclk_table->entries[i].clk)) {
3000 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3001 				allowed_mclk_table->entries[i].clk;
3002 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
3003 			pi->dpm_table.mclk_table.count++;
3004 		}
3005 	}
3006 
3007 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3008 		pi->dpm_table.vddc_table.dpm_levels[i].value =
3009 			allowed_sclk_vddc_table->entries[i].v;
3010 		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3011 			std_voltage_table->entries[i].leakage;
3012 		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3013 	}
3014 	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3015 
3016 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3017 	if (allowed_mclk_table) {
3018 		for (i = 0; i < allowed_mclk_table->count; i++) {
3019 			pi->dpm_table.vddci_table.dpm_levels[i].value =
3020 				allowed_mclk_table->entries[i].v;
3021 			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3022 		}
3023 		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3024 	}
3025 
3026 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3027 	if (allowed_mclk_table) {
3028 		for (i = 0; i < allowed_mclk_table->count; i++) {
3029 			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3030 				allowed_mclk_table->entries[i].v;
3031 			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3032 		}
3033 		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3034 	}
3035 
3036 	ci_setup_default_pcie_tables(rdev);
3037 
3038 	return 0;
3039 }
3040 
3041 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3042 			      u32 value, u32 *boot_level)
3043 {
3044 	u32 i;
3045 	int ret = -EINVAL;
3046 
3047 	for(i = 0; i < table->count; i++) {
3048 		if (value == table->dpm_levels[i].value) {
3049 			*boot_level = i;
3050 			ret = 0;
3051 		}
3052 	}
3053 
3054 	return ret;
3055 }
3056 
3057 static int ci_init_smc_table(struct radeon_device *rdev)
3058 {
3059 	struct ci_power_info *pi = ci_get_pi(rdev);
3060 	struct ci_ulv_parm *ulv = &pi->ulv;
3061 	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3062 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3063 	int ret;
3064 
3065 	ret = ci_setup_default_dpm_tables(rdev);
3066 	if (ret)
3067 		return ret;
3068 
3069 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3070 		ci_populate_smc_voltage_tables(rdev, table);
3071 
3072 	ci_init_fps_limits(rdev);
3073 
3074 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3075 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3076 
3077 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3078 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3079 
3080 	if (pi->mem_gddr5)
3081 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3082 
3083 	if (ulv->supported) {
3084 		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3085 		if (ret)
3086 			return ret;
3087 		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3088 	}
3089 
3090 	ret = ci_populate_all_graphic_levels(rdev);
3091 	if (ret)
3092 		return ret;
3093 
3094 	ret = ci_populate_all_memory_levels(rdev);
3095 	if (ret)
3096 		return ret;
3097 
3098 	ci_populate_smc_link_level(rdev, table);
3099 
3100 	ret = ci_populate_smc_acpi_level(rdev, table);
3101 	if (ret)
3102 		return ret;
3103 
3104 	ret = ci_populate_smc_vce_level(rdev, table);
3105 	if (ret)
3106 		return ret;
3107 
3108 	ret = ci_populate_smc_acp_level(rdev, table);
3109 	if (ret)
3110 		return ret;
3111 
3112 	ret = ci_populate_smc_samu_level(rdev, table);
3113 	if (ret)
3114 		return ret;
3115 
3116 	ret = ci_do_program_memory_timing_parameters(rdev);
3117 	if (ret)
3118 		return ret;
3119 
3120 	ret = ci_populate_smc_uvd_level(rdev, table);
3121 	if (ret)
3122 		return ret;
3123 
3124 	table->UvdBootLevel  = 0;
3125 	table->VceBootLevel  = 0;
3126 	table->AcpBootLevel  = 0;
3127 	table->SamuBootLevel  = 0;
3128 	table->GraphicsBootLevel  = 0;
3129 	table->MemoryBootLevel  = 0;
3130 
3131 	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3132 				 pi->vbios_boot_state.sclk_bootup_value,
3133 				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3134 
3135 	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3136 				 pi->vbios_boot_state.mclk_bootup_value,
3137 				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3138 
3139 	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3140 	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3141 	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3142 
3143 	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3144 
3145 	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3146 	if (ret)
3147 		return ret;
3148 
3149 	table->UVDInterval = 1;
3150 	table->VCEInterval = 1;
3151 	table->ACPInterval = 1;
3152 	table->SAMUInterval = 1;
3153 	table->GraphicsVoltageChangeEnable = 1;
3154 	table->GraphicsThermThrottleEnable = 1;
3155 	table->GraphicsInterval = 1;
3156 	table->VoltageInterval = 1;
3157 	table->ThermalInterval = 1;
3158 	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3159 					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3160 	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3161 					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3162 	table->MemoryVoltageChangeEnable = 1;
3163 	table->MemoryInterval = 1;
3164 	table->VoltageResponseTime = 0;
3165 	table->VddcVddciDelta = 4000;
3166 	table->PhaseResponseTime = 0;
3167 	table->MemoryThermThrottleEnable = 1;
3168 	table->PCIeBootLinkLevel = 0;
3169 	table->PCIeGenInterval = 1;
3170 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3171 		table->SVI2Enable  = 1;
3172 	else
3173 		table->SVI2Enable  = 0;
3174 
3175 	table->ThermGpio = 17;
3176 	table->SclkStepSize = 0x4000;
3177 
3178 	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3179 	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3180 	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3181 	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3182 	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3183 	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3184 	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3185 	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3186 	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3187 	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3188 	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3189 	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3190 	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3191 	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3192 
3193 	ret = ci_copy_bytes_to_smc(rdev,
3194 				   pi->dpm_table_start +
3195 				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3196 				   (u8 *)&table->SystemFlags,
3197 				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3198 				   pi->sram_end);
3199 	if (ret)
3200 		return ret;
3201 
3202 	return 0;
3203 }
3204 
3205 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3206 				      struct ci_single_dpm_table *dpm_table,
3207 				      u32 low_limit, u32 high_limit)
3208 {
3209 	u32 i;
3210 
3211 	for (i = 0; i < dpm_table->count; i++) {
3212 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3213 		    (dpm_table->dpm_levels[i].value > high_limit))
3214 			dpm_table->dpm_levels[i].enabled = false;
3215 		else
3216 			dpm_table->dpm_levels[i].enabled = true;
3217 	}
3218 }
3219 
3220 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3221 				    u32 speed_low, u32 lanes_low,
3222 				    u32 speed_high, u32 lanes_high)
3223 {
3224 	struct ci_power_info *pi = ci_get_pi(rdev);
3225 	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3226 	u32 i, j;
3227 
3228 	for (i = 0; i < pcie_table->count; i++) {
3229 		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3230 		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3231 		    (pcie_table->dpm_levels[i].value > speed_high) ||
3232 		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3233 			pcie_table->dpm_levels[i].enabled = false;
3234 		else
3235 			pcie_table->dpm_levels[i].enabled = true;
3236 	}
3237 
3238 	for (i = 0; i < pcie_table->count; i++) {
3239 		if (pcie_table->dpm_levels[i].enabled) {
3240 			for (j = i + 1; j < pcie_table->count; j++) {
3241 				if (pcie_table->dpm_levels[j].enabled) {
3242 					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3243 					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3244 						pcie_table->dpm_levels[j].enabled = false;
3245 				}
3246 			}
3247 		}
3248 	}
3249 }
3250 
3251 static int ci_trim_dpm_states(struct radeon_device *rdev,
3252 			      struct radeon_ps *radeon_state)
3253 {
3254 	struct ci_ps *state = ci_get_ps(radeon_state);
3255 	struct ci_power_info *pi = ci_get_pi(rdev);
3256 	u32 high_limit_count;
3257 
3258 	if (state->performance_level_count < 1)
3259 		return -EINVAL;
3260 
3261 	if (state->performance_level_count == 1)
3262 		high_limit_count = 0;
3263 	else
3264 		high_limit_count = 1;
3265 
3266 	ci_trim_single_dpm_states(rdev,
3267 				  &pi->dpm_table.sclk_table,
3268 				  state->performance_levels[0].sclk,
3269 				  state->performance_levels[high_limit_count].sclk);
3270 
3271 	ci_trim_single_dpm_states(rdev,
3272 				  &pi->dpm_table.mclk_table,
3273 				  state->performance_levels[0].mclk,
3274 				  state->performance_levels[high_limit_count].mclk);
3275 
3276 	ci_trim_pcie_dpm_states(rdev,
3277 				state->performance_levels[0].pcie_gen,
3278 				state->performance_levels[0].pcie_lane,
3279 				state->performance_levels[high_limit_count].pcie_gen,
3280 				state->performance_levels[high_limit_count].pcie_lane);
3281 
3282 	return 0;
3283 }
3284 
3285 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3286 {
3287 	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3288 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3289 	struct radeon_clock_voltage_dependency_table *vddc_table =
3290 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3291 	u32 requested_voltage = 0;
3292 	u32 i;
3293 
3294 	if (disp_voltage_table == NULL)
3295 		return -EINVAL;
3296 	if (!disp_voltage_table->count)
3297 		return -EINVAL;
3298 
3299 	for (i = 0; i < disp_voltage_table->count; i++) {
3300 		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3301 			requested_voltage = disp_voltage_table->entries[i].v;
3302 	}
3303 
3304 	for (i = 0; i < vddc_table->count; i++) {
3305 		if (requested_voltage <= vddc_table->entries[i].v) {
3306 			requested_voltage = vddc_table->entries[i].v;
3307 			return (ci_send_msg_to_smc_with_parameter(rdev,
3308 								  PPSMC_MSG_VddC_Request,
3309 								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3310 				0 : -EINVAL;
3311 		}
3312 	}
3313 
3314 	return -EINVAL;
3315 }
3316 
3317 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3318 {
3319 	struct ci_power_info *pi = ci_get_pi(rdev);
3320 	PPSMC_Result result;
3321 
3322 	if (!pi->sclk_dpm_key_disabled) {
3323 		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3324 			result = ci_send_msg_to_smc_with_parameter(rdev,
3325 								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3326 								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3327 			if (result != PPSMC_Result_OK)
3328 				return -EINVAL;
3329 		}
3330 	}
3331 
3332 	if (!pi->mclk_dpm_key_disabled) {
3333 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3334 			result = ci_send_msg_to_smc_with_parameter(rdev,
3335 								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3336 								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3337 			if (result != PPSMC_Result_OK)
3338 				return -EINVAL;
3339 		}
3340 	}
3341 
3342 	if (!pi->pcie_dpm_key_disabled) {
3343 		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3344 			result = ci_send_msg_to_smc_with_parameter(rdev,
3345 								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3346 								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3347 			if (result != PPSMC_Result_OK)
3348 				return -EINVAL;
3349 		}
3350 	}
3351 
3352 	ci_apply_disp_minimum_voltage_request(rdev);
3353 
3354 	return 0;
3355 }
3356 
3357 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3358 						   struct radeon_ps *radeon_state)
3359 {
3360 	struct ci_power_info *pi = ci_get_pi(rdev);
3361 	struct ci_ps *state = ci_get_ps(radeon_state);
3362 	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3363 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3364 	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3365 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3366 	u32 i;
3367 
3368 	pi->need_update_smu7_dpm_table = 0;
3369 
3370 	for (i = 0; i < sclk_table->count; i++) {
3371 		if (sclk == sclk_table->dpm_levels[i].value)
3372 			break;
3373 	}
3374 
3375 	if (i >= sclk_table->count) {
3376 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3377 	} else {
3378 		/* XXX check display min clock requirements */
3379 		if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3380 			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3381 	}
3382 
3383 	for (i = 0; i < mclk_table->count; i++) {
3384 		if (mclk == mclk_table->dpm_levels[i].value)
3385 			break;
3386 	}
3387 
3388 	if (i >= mclk_table->count)
3389 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3390 
3391 	if (rdev->pm.dpm.current_active_crtc_count !=
3392 	    rdev->pm.dpm.new_active_crtc_count)
3393 		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3394 }
3395 
3396 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3397 						       struct radeon_ps *radeon_state)
3398 {
3399 	struct ci_power_info *pi = ci_get_pi(rdev);
3400 	struct ci_ps *state = ci_get_ps(radeon_state);
3401 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3402 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3403 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3404 	int ret;
3405 
3406 	if (!pi->need_update_smu7_dpm_table)
3407 		return 0;
3408 
3409 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3410 		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3411 
3412 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3413 		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3414 
3415 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3416 		ret = ci_populate_all_graphic_levels(rdev);
3417 		if (ret)
3418 			return ret;
3419 	}
3420 
3421 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3422 		ret = ci_populate_all_memory_levels(rdev);
3423 		if (ret)
3424 			return ret;
3425 	}
3426 
3427 	return 0;
3428 }
3429 
3430 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3431 {
3432 	struct ci_power_info *pi = ci_get_pi(rdev);
3433 	const struct radeon_clock_and_voltage_limits *max_limits;
3434 	int i;
3435 
3436 	if (rdev->pm.dpm.ac_power)
3437 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3438 	else
3439 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3440 
3441 	if (enable) {
3442 		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3443 
3444 		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3445 			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3446 				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3447 
3448 				if (!pi->caps_uvd_dpm)
3449 					break;
3450 			}
3451 		}
3452 
3453 		ci_send_msg_to_smc_with_parameter(rdev,
3454 						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3455 						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3456 
3457 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3458 			pi->uvd_enabled = true;
3459 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3460 			ci_send_msg_to_smc_with_parameter(rdev,
3461 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3462 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3463 		}
3464 	} else {
3465 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3466 			pi->uvd_enabled = false;
3467 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3468 			ci_send_msg_to_smc_with_parameter(rdev,
3469 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3470 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3471 		}
3472 	}
3473 
3474 	return (ci_send_msg_to_smc(rdev, enable ?
3475 				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3476 		0 : -EINVAL;
3477 }
3478 
3479 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3480 {
3481 	struct ci_power_info *pi = ci_get_pi(rdev);
3482 	const struct radeon_clock_and_voltage_limits *max_limits;
3483 	int i;
3484 
3485 	if (rdev->pm.dpm.ac_power)
3486 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3487 	else
3488 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3489 
3490 	if (enable) {
3491 		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3492 		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3493 			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3494 				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3495 
3496 				if (!pi->caps_vce_dpm)
3497 					break;
3498 			}
3499 		}
3500 
3501 		ci_send_msg_to_smc_with_parameter(rdev,
3502 						  PPSMC_MSG_VCEDPM_SetEnabledMask,
3503 						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3504 	}
3505 
3506 	return (ci_send_msg_to_smc(rdev, enable ?
3507 				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3508 		0 : -EINVAL;
3509 }
3510 
3511 #if 0
3512 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3513 {
3514 	struct ci_power_info *pi = ci_get_pi(rdev);
3515 	const struct radeon_clock_and_voltage_limits *max_limits;
3516 	int i;
3517 
3518 	if (rdev->pm.dpm.ac_power)
3519 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3520 	else
3521 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3522 
3523 	if (enable) {
3524 		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3525 		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3526 			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3527 				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3528 
3529 				if (!pi->caps_samu_dpm)
3530 					break;
3531 			}
3532 		}
3533 
3534 		ci_send_msg_to_smc_with_parameter(rdev,
3535 						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
3536 						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3537 	}
3538 	return (ci_send_msg_to_smc(rdev, enable ?
3539 				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3540 		0 : -EINVAL;
3541 }
3542 
3543 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3544 {
3545 	struct ci_power_info *pi = ci_get_pi(rdev);
3546 	const struct radeon_clock_and_voltage_limits *max_limits;
3547 	int i;
3548 
3549 	if (rdev->pm.dpm.ac_power)
3550 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3551 	else
3552 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3553 
3554 	if (enable) {
3555 		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3556 		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3557 			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3558 				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3559 
3560 				if (!pi->caps_acp_dpm)
3561 					break;
3562 			}
3563 		}
3564 
3565 		ci_send_msg_to_smc_with_parameter(rdev,
3566 						  PPSMC_MSG_ACPDPM_SetEnabledMask,
3567 						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3568 	}
3569 
3570 	return (ci_send_msg_to_smc(rdev, enable ?
3571 				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3572 		0 : -EINVAL;
3573 }
3574 #endif
3575 
3576 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3577 {
3578 	struct ci_power_info *pi = ci_get_pi(rdev);
3579 	u32 tmp;
3580 
3581 	if (!gate) {
3582 		if (pi->caps_uvd_dpm ||
3583 		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3584 			pi->smc_state_table.UvdBootLevel = 0;
3585 		else
3586 			pi->smc_state_table.UvdBootLevel =
3587 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3588 
3589 		tmp = RREG32_SMC(DPM_TABLE_475);
3590 		tmp &= ~UvdBootLevel_MASK;
3591 		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3592 		WREG32_SMC(DPM_TABLE_475, tmp);
3593 	}
3594 
3595 	return ci_enable_uvd_dpm(rdev, !gate);
3596 }
3597 
3598 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3599 {
3600 	u8 i;
3601 	u32 min_evclk = 30000; /* ??? */
3602 	struct radeon_vce_clock_voltage_dependency_table *table =
3603 		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3604 
3605 	for (i = 0; i < table->count; i++) {
3606 		if (table->entries[i].evclk >= min_evclk)
3607 			return i;
3608 	}
3609 
3610 	return table->count - 1;
3611 }
3612 
3613 static int ci_update_vce_dpm(struct radeon_device *rdev,
3614 			     struct radeon_ps *radeon_new_state,
3615 			     struct radeon_ps *radeon_current_state)
3616 {
3617 	struct ci_power_info *pi = ci_get_pi(rdev);
3618 	int ret = 0;
3619 	u32 tmp;
3620 
3621 	if (radeon_current_state->evclk != radeon_new_state->evclk) {
3622 		if (radeon_new_state->evclk) {
3623 			/* turn the clocks on when encoding */
3624 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
3625 
3626 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3627 			tmp = RREG32_SMC(DPM_TABLE_475);
3628 			tmp &= ~VceBootLevel_MASK;
3629 			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3630 			WREG32_SMC(DPM_TABLE_475, tmp);
3631 
3632 			ret = ci_enable_vce_dpm(rdev, true);
3633 		} else {
3634 			/* turn the clocks off when not encoding */
3635 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
3636 
3637 			ret = ci_enable_vce_dpm(rdev, false);
3638 		}
3639 	}
3640 	return ret;
3641 }
3642 
3643 #if 0
3644 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3645 {
3646 	return ci_enable_samu_dpm(rdev, gate);
3647 }
3648 
3649 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3650 {
3651 	struct ci_power_info *pi = ci_get_pi(rdev);
3652 	u32 tmp;
3653 
3654 	if (!gate) {
3655 		pi->smc_state_table.AcpBootLevel = 0;
3656 
3657 		tmp = RREG32_SMC(DPM_TABLE_475);
3658 		tmp &= ~AcpBootLevel_MASK;
3659 		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3660 		WREG32_SMC(DPM_TABLE_475, tmp);
3661 	}
3662 
3663 	return ci_enable_acp_dpm(rdev, !gate);
3664 }
3665 #endif
3666 
3667 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3668 					     struct radeon_ps *radeon_state)
3669 {
3670 	struct ci_power_info *pi = ci_get_pi(rdev);
3671 	int ret;
3672 
3673 	ret = ci_trim_dpm_states(rdev, radeon_state);
3674 	if (ret)
3675 		return ret;
3676 
3677 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3678 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3679 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3680 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3681 	pi->last_mclk_dpm_enable_mask =
3682 		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3683 	if (pi->uvd_enabled) {
3684 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3685 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3686 	}
3687 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3688 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3689 
3690 	return 0;
3691 }
3692 
3693 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3694 				       u32 level_mask)
3695 {
3696 	u32 level = 0;
3697 
3698 	while ((level_mask & (1 << level)) == 0)
3699 		level++;
3700 
3701 	return level;
3702 }
3703 
3704 
3705 int ci_dpm_force_performance_level(struct radeon_device *rdev,
3706 				   enum radeon_dpm_forced_level level)
3707 {
3708 	struct ci_power_info *pi = ci_get_pi(rdev);
3709 	PPSMC_Result smc_result;
3710 	u32 tmp, levels, i;
3711 	int ret;
3712 
3713 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3714 		if ((!pi->sclk_dpm_key_disabled) &&
3715 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3716 			levels = 0;
3717 			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3718 			while (tmp >>= 1)
3719 				levels++;
3720 			if (levels) {
3721 				ret = ci_dpm_force_state_sclk(rdev, levels);
3722 				if (ret)
3723 					return ret;
3724 				for (i = 0; i < rdev->usec_timeout; i++) {
3725 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3726 					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3727 					if (tmp == levels)
3728 						break;
3729 					udelay(1);
3730 				}
3731 			}
3732 		}
3733 		if ((!pi->mclk_dpm_key_disabled) &&
3734 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3735 			levels = 0;
3736 			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3737 			while (tmp >>= 1)
3738 				levels++;
3739 			if (levels) {
3740 				ret = ci_dpm_force_state_mclk(rdev, levels);
3741 				if (ret)
3742 					return ret;
3743 				for (i = 0; i < rdev->usec_timeout; i++) {
3744 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3745 					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3746 					if (tmp == levels)
3747 						break;
3748 					udelay(1);
3749 				}
3750 			}
3751 		}
3752 		if ((!pi->pcie_dpm_key_disabled) &&
3753 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3754 			levels = 0;
3755 			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3756 			while (tmp >>= 1)
3757 				levels++;
3758 			if (levels) {
3759 				ret = ci_dpm_force_state_pcie(rdev, level);
3760 				if (ret)
3761 					return ret;
3762 				for (i = 0; i < rdev->usec_timeout; i++) {
3763 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3764 					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3765 					if (tmp == levels)
3766 						break;
3767 					udelay(1);
3768 				}
3769 			}
3770 		}
3771 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3772 		if ((!pi->sclk_dpm_key_disabled) &&
3773 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3774 			levels = ci_get_lowest_enabled_level(rdev,
3775 							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3776 			ret = ci_dpm_force_state_sclk(rdev, levels);
3777 			if (ret)
3778 				return ret;
3779 			for (i = 0; i < rdev->usec_timeout; i++) {
3780 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3781 				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3782 				if (tmp == levels)
3783 					break;
3784 				udelay(1);
3785 			}
3786 		}
3787 		if ((!pi->mclk_dpm_key_disabled) &&
3788 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3789 			levels = ci_get_lowest_enabled_level(rdev,
3790 							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3791 			ret = ci_dpm_force_state_mclk(rdev, levels);
3792 			if (ret)
3793 				return ret;
3794 			for (i = 0; i < rdev->usec_timeout; i++) {
3795 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3796 				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3797 				if (tmp == levels)
3798 					break;
3799 				udelay(1);
3800 			}
3801 		}
3802 		if ((!pi->pcie_dpm_key_disabled) &&
3803 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3804 			levels = ci_get_lowest_enabled_level(rdev,
3805 							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3806 			ret = ci_dpm_force_state_pcie(rdev, levels);
3807 			if (ret)
3808 				return ret;
3809 			for (i = 0; i < rdev->usec_timeout; i++) {
3810 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3811 				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3812 				if (tmp == levels)
3813 					break;
3814 				udelay(1);
3815 			}
3816 		}
3817 	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3818 		if (!pi->sclk_dpm_key_disabled) {
3819 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3820 			if (smc_result != PPSMC_Result_OK)
3821 				return -EINVAL;
3822 		}
3823 		if (!pi->mclk_dpm_key_disabled) {
3824 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3825 			if (smc_result != PPSMC_Result_OK)
3826 				return -EINVAL;
3827 		}
3828 		if (!pi->pcie_dpm_key_disabled) {
3829 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3830 			if (smc_result != PPSMC_Result_OK)
3831 				return -EINVAL;
3832 		}
3833 	}
3834 
3835 	rdev->pm.dpm.forced_level = level;
3836 
3837 	return 0;
3838 }
3839 
3840 static int ci_set_mc_special_registers(struct radeon_device *rdev,
3841 				       struct ci_mc_reg_table *table)
3842 {
3843 	struct ci_power_info *pi = ci_get_pi(rdev);
3844 	u8 i, j, k;
3845 	u32 temp_reg;
3846 
3847 	for (i = 0, j = table->last; i < table->last; i++) {
3848 		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3849 			return -EINVAL;
3850 		switch(table->mc_reg_address[i].s1 << 2) {
3851 		case MC_SEQ_MISC1:
3852 			temp_reg = RREG32(MC_PMG_CMD_EMRS);
3853 			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3854 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3855 			for (k = 0; k < table->num_entries; k++) {
3856 				table->mc_reg_table_entry[k].mc_data[j] =
3857 					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3858 			}
3859 			j++;
3860 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3861 				return -EINVAL;
3862 
3863 			temp_reg = RREG32(MC_PMG_CMD_MRS);
3864 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3865 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3866 			for (k = 0; k < table->num_entries; k++) {
3867 				table->mc_reg_table_entry[k].mc_data[j] =
3868 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3869 				if (!pi->mem_gddr5)
3870 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3871 			}
3872 			j++;
3873 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3874 				return -EINVAL;
3875 
3876 			if (!pi->mem_gddr5) {
3877 				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3878 				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3879 				for (k = 0; k < table->num_entries; k++) {
3880 					table->mc_reg_table_entry[k].mc_data[j] =
3881 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3882 				}
3883 				j++;
3884 				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3885 					return -EINVAL;
3886 			}
3887 			break;
3888 		case MC_SEQ_RESERVE_M:
3889 			temp_reg = RREG32(MC_PMG_CMD_MRS1);
3890 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3891 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3892 			for (k = 0; k < table->num_entries; k++) {
3893 				table->mc_reg_table_entry[k].mc_data[j] =
3894 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3895 			}
3896 			j++;
3897 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3898 				return -EINVAL;
3899 			break;
3900 		default:
3901 			break;
3902 		}
3903 
3904 	}
3905 
3906 	table->last = j;
3907 
3908 	return 0;
3909 }
3910 
3911 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3912 {
3913 	bool result = true;
3914 
3915 	switch(in_reg) {
3916 	case MC_SEQ_RAS_TIMING >> 2:
3917 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3918 		break;
3919 	case MC_SEQ_DLL_STBY >> 2:
3920 		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3921 		break;
3922 	case MC_SEQ_G5PDX_CMD0 >> 2:
3923 		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3924 		break;
3925 	case MC_SEQ_G5PDX_CMD1 >> 2:
3926 		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3927 		break;
3928 	case MC_SEQ_G5PDX_CTRL >> 2:
3929 		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3930 		break;
3931 	case MC_SEQ_CAS_TIMING >> 2:
3932 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3933             break;
3934 	case MC_SEQ_MISC_TIMING >> 2:
3935 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3936 		break;
3937 	case MC_SEQ_MISC_TIMING2 >> 2:
3938 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3939 		break;
3940 	case MC_SEQ_PMG_DVS_CMD >> 2:
3941 		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3942 		break;
3943 	case MC_SEQ_PMG_DVS_CTL >> 2:
3944 		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3945 		break;
3946 	case MC_SEQ_RD_CTL_D0 >> 2:
3947 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3948 		break;
3949 	case MC_SEQ_RD_CTL_D1 >> 2:
3950 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3951 		break;
3952 	case MC_SEQ_WR_CTL_D0 >> 2:
3953 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3954 		break;
3955 	case MC_SEQ_WR_CTL_D1 >> 2:
3956 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3957 		break;
3958 	case MC_PMG_CMD_EMRS >> 2:
3959 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3960 		break;
3961 	case MC_PMG_CMD_MRS >> 2:
3962 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3963 		break;
3964 	case MC_PMG_CMD_MRS1 >> 2:
3965 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3966 		break;
3967 	case MC_SEQ_PMG_TIMING >> 2:
3968 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3969 		break;
3970 	case MC_PMG_CMD_MRS2 >> 2:
3971 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3972 		break;
3973 	case MC_SEQ_WR_CTL_2 >> 2:
3974 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3975 		break;
3976 	default:
3977 		result = false;
3978 		break;
3979 	}
3980 
3981 	return result;
3982 }
3983 
3984 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3985 {
3986 	u8 i, j;
3987 
3988 	for (i = 0; i < table->last; i++) {
3989 		for (j = 1; j < table->num_entries; j++) {
3990 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3991 			    table->mc_reg_table_entry[j].mc_data[i]) {
3992 				table->valid_flag |= 1 << i;
3993 				break;
3994 			}
3995 		}
3996 	}
3997 }
3998 
3999 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4000 {
4001 	u32 i;
4002 	u16 address;
4003 
4004 	for (i = 0; i < table->last; i++) {
4005 		table->mc_reg_address[i].s0 =
4006 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4007 			address : table->mc_reg_address[i].s1;
4008 	}
4009 }
4010 
4011 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4012 				      struct ci_mc_reg_table *ci_table)
4013 {
4014 	u8 i, j;
4015 
4016 	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4017 		return -EINVAL;
4018 	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4019 		return -EINVAL;
4020 
4021 	for (i = 0; i < table->last; i++)
4022 		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4023 
4024 	ci_table->last = table->last;
4025 
4026 	for (i = 0; i < table->num_entries; i++) {
4027 		ci_table->mc_reg_table_entry[i].mclk_max =
4028 			table->mc_reg_table_entry[i].mclk_max;
4029 		for (j = 0; j < table->last; j++)
4030 			ci_table->mc_reg_table_entry[i].mc_data[j] =
4031 				table->mc_reg_table_entry[i].mc_data[j];
4032 	}
4033 	ci_table->num_entries = table->num_entries;
4034 
4035 	return 0;
4036 }
4037 
4038 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4039 {
4040 	struct ci_power_info *pi = ci_get_pi(rdev);
4041 	struct atom_mc_reg_table *table;
4042 	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4043 	u8 module_index = rv770_get_memory_module_index(rdev);
4044 	int ret;
4045 
4046 	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4047 	if (!table)
4048 		return -ENOMEM;
4049 
4050 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4051 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4052 	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4053 	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4054 	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4055 	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4056 	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4057 	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4058 	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4059 	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4060 	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4061 	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4062 	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4063 	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4064 	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4065 	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4066 	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4067 	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4068 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4069 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4070 
4071 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4072 	if (ret)
4073 		goto init_mc_done;
4074 
4075         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4076 	if (ret)
4077 		goto init_mc_done;
4078 
4079 	ci_set_s0_mc_reg_index(ci_table);
4080 
4081 	ret = ci_set_mc_special_registers(rdev, ci_table);
4082 	if (ret)
4083 		goto init_mc_done;
4084 
4085 	ci_set_valid_flag(ci_table);
4086 
4087 init_mc_done:
4088 	kfree(table);
4089 
4090 	return ret;
4091 }
4092 
4093 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4094 					SMU7_Discrete_MCRegisters *mc_reg_table)
4095 {
4096 	struct ci_power_info *pi = ci_get_pi(rdev);
4097 	u32 i, j;
4098 
4099 	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4100 		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4101 			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4102 				return -EINVAL;
4103 			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4104 			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4105 			i++;
4106 		}
4107 	}
4108 
4109 	mc_reg_table->last = (u8)i;
4110 
4111 	return 0;
4112 }
4113 
4114 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4115 				    SMU7_Discrete_MCRegisterSet *data,
4116 				    u32 num_entries, u32 valid_flag)
4117 {
4118 	u32 i, j;
4119 
4120 	for (i = 0, j = 0; j < num_entries; j++) {
4121 		if (valid_flag & (1 << j)) {
4122 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4123 			i++;
4124 		}
4125 	}
4126 }
4127 
4128 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4129 						 const u32 memory_clock,
4130 						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4131 {
4132 	struct ci_power_info *pi = ci_get_pi(rdev);
4133 	u32 i = 0;
4134 
4135 	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4136 		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4137 			break;
4138 	}
4139 
4140 	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4141 		--i;
4142 
4143 	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4144 				mc_reg_table_data, pi->mc_reg_table.last,
4145 				pi->mc_reg_table.valid_flag);
4146 }
4147 
4148 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4149 					   SMU7_Discrete_MCRegisters *mc_reg_table)
4150 {
4151 	struct ci_power_info *pi = ci_get_pi(rdev);
4152 	u32 i;
4153 
4154 	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4155 		ci_convert_mc_reg_table_entry_to_smc(rdev,
4156 						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4157 						     &mc_reg_table->data[i]);
4158 }
4159 
4160 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4161 {
4162 	struct ci_power_info *pi = ci_get_pi(rdev);
4163 	int ret;
4164 
4165 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4166 
4167 	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4168 	if (ret)
4169 		return ret;
4170 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4171 
4172 	return ci_copy_bytes_to_smc(rdev,
4173 				    pi->mc_reg_table_start,
4174 				    (u8 *)&pi->smc_mc_reg_table,
4175 				    sizeof(SMU7_Discrete_MCRegisters),
4176 				    pi->sram_end);
4177 }
4178 
4179 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4180 {
4181 	struct ci_power_info *pi = ci_get_pi(rdev);
4182 
4183 	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4184 		return 0;
4185 
4186 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4187 
4188 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4189 
4190 	return ci_copy_bytes_to_smc(rdev,
4191 				    pi->mc_reg_table_start +
4192 				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4193 				    (u8 *)&pi->smc_mc_reg_table.data[0],
4194 				    sizeof(SMU7_Discrete_MCRegisterSet) *
4195 				    pi->dpm_table.mclk_table.count,
4196 				    pi->sram_end);
4197 }
4198 
4199 static void ci_enable_voltage_control(struct radeon_device *rdev)
4200 {
4201 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4202 
4203 	tmp |= VOLT_PWRMGT_EN;
4204 	WREG32_SMC(GENERAL_PWRMGT, tmp);
4205 }
4206 
4207 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4208 						      struct radeon_ps *radeon_state)
4209 {
4210 	struct ci_ps *state = ci_get_ps(radeon_state);
4211 	int i;
4212 	u16 pcie_speed, max_speed = 0;
4213 
4214 	for (i = 0; i < state->performance_level_count; i++) {
4215 		pcie_speed = state->performance_levels[i].pcie_gen;
4216 		if (max_speed < pcie_speed)
4217 			max_speed = pcie_speed;
4218 	}
4219 
4220 	return max_speed;
4221 }
4222 
4223 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4224 {
4225 	u32 speed_cntl = 0;
4226 
4227 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4228 	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4229 
4230 	return (u16)speed_cntl;
4231 }
4232 
4233 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4234 {
4235 	u32 link_width = 0;
4236 
4237 	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4238 	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4239 
4240 	switch (link_width) {
4241 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4242 		return 1;
4243 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4244 		return 2;
4245 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4246 		return 4;
4247 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4248 		return 8;
4249 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4250 		/* not actually supported */
4251 		return 12;
4252 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4253 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4254 	default:
4255 		return 16;
4256 	}
4257 }
4258 
4259 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4260 							     struct radeon_ps *radeon_new_state,
4261 							     struct radeon_ps *radeon_current_state)
4262 {
4263 	struct ci_power_info *pi = ci_get_pi(rdev);
4264 	enum radeon_pcie_gen target_link_speed =
4265 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4266 	enum radeon_pcie_gen current_link_speed;
4267 
4268 	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4269 		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4270 	else
4271 		current_link_speed = pi->force_pcie_gen;
4272 
4273 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4274 	pi->pspp_notify_required = false;
4275 	if (target_link_speed > current_link_speed) {
4276 		switch (target_link_speed) {
4277 #ifdef CONFIG_ACPI
4278 		case RADEON_PCIE_GEN3:
4279 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4280 				break;
4281 			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4282 			if (current_link_speed == RADEON_PCIE_GEN2)
4283 				break;
4284 		case RADEON_PCIE_GEN2:
4285 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4286 				break;
4287 #endif
4288 		default:
4289 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4290 			break;
4291 		}
4292 	} else {
4293 		if (target_link_speed < current_link_speed)
4294 			pi->pspp_notify_required = true;
4295 	}
4296 }
4297 
4298 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4299 							   struct radeon_ps *radeon_new_state,
4300 							   struct radeon_ps *radeon_current_state)
4301 {
4302 	struct ci_power_info *pi = ci_get_pi(rdev);
4303 	enum radeon_pcie_gen target_link_speed =
4304 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4305 	u8 request;
4306 
4307 	if (pi->pspp_notify_required) {
4308 		if (target_link_speed == RADEON_PCIE_GEN3)
4309 			request = PCIE_PERF_REQ_PECI_GEN3;
4310 		else if (target_link_speed == RADEON_PCIE_GEN2)
4311 			request = PCIE_PERF_REQ_PECI_GEN2;
4312 		else
4313 			request = PCIE_PERF_REQ_PECI_GEN1;
4314 
4315 		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4316 		    (ci_get_current_pcie_speed(rdev) > 0))
4317 			return;
4318 
4319 #ifdef CONFIG_ACPI
4320 		radeon_acpi_pcie_performance_request(rdev, request, false);
4321 #endif
4322 	}
4323 }
4324 
4325 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4326 {
4327 	struct ci_power_info *pi = ci_get_pi(rdev);
4328 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4329 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4330 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4331 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4332 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4333 		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4334 
4335 	if (allowed_sclk_vddc_table == NULL)
4336 		return -EINVAL;
4337 	if (allowed_sclk_vddc_table->count < 1)
4338 		return -EINVAL;
4339 	if (allowed_mclk_vddc_table == NULL)
4340 		return -EINVAL;
4341 	if (allowed_mclk_vddc_table->count < 1)
4342 		return -EINVAL;
4343 	if (allowed_mclk_vddci_table == NULL)
4344 		return -EINVAL;
4345 	if (allowed_mclk_vddci_table->count < 1)
4346 		return -EINVAL;
4347 
4348 	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4349 	pi->max_vddc_in_pp_table =
4350 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4351 
4352 	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4353 	pi->max_vddci_in_pp_table =
4354 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4355 
4356 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4357 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4358 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4359 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4360 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4361 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4362         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4363 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4364 
4365 	return 0;
4366 }
4367 
4368 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4369 {
4370 	struct ci_power_info *pi = ci_get_pi(rdev);
4371 	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4372 	u32 leakage_index;
4373 
4374 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4375 		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4376 			*vddc = leakage_table->actual_voltage[leakage_index];
4377 			break;
4378 		}
4379 	}
4380 }
4381 
4382 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4383 {
4384 	struct ci_power_info *pi = ci_get_pi(rdev);
4385 	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4386 	u32 leakage_index;
4387 
4388 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4389 		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4390 			*vddci = leakage_table->actual_voltage[leakage_index];
4391 			break;
4392 		}
4393 	}
4394 }
4395 
4396 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4397 								      struct radeon_clock_voltage_dependency_table *table)
4398 {
4399 	u32 i;
4400 
4401 	if (table) {
4402 		for (i = 0; i < table->count; i++)
4403 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4404 	}
4405 }
4406 
4407 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4408 								       struct radeon_clock_voltage_dependency_table *table)
4409 {
4410 	u32 i;
4411 
4412 	if (table) {
4413 		for (i = 0; i < table->count; i++)
4414 			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4415 	}
4416 }
4417 
4418 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4419 									  struct radeon_vce_clock_voltage_dependency_table *table)
4420 {
4421 	u32 i;
4422 
4423 	if (table) {
4424 		for (i = 0; i < table->count; i++)
4425 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4426 	}
4427 }
4428 
4429 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4430 									  struct radeon_uvd_clock_voltage_dependency_table *table)
4431 {
4432 	u32 i;
4433 
4434 	if (table) {
4435 		for (i = 0; i < table->count; i++)
4436 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4437 	}
4438 }
4439 
4440 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4441 								   struct radeon_phase_shedding_limits_table *table)
4442 {
4443 	u32 i;
4444 
4445 	if (table) {
4446 		for (i = 0; i < table->count; i++)
4447 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4448 	}
4449 }
4450 
4451 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4452 							    struct radeon_clock_and_voltage_limits *table)
4453 {
4454 	if (table) {
4455 		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4456 		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4457 	}
4458 }
4459 
4460 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4461 							 struct radeon_cac_leakage_table *table)
4462 {
4463 	u32 i;
4464 
4465 	if (table) {
4466 		for (i = 0; i < table->count; i++)
4467 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4468 	}
4469 }
4470 
4471 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4472 {
4473 
4474 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4475 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4476 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4477 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4478 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4479 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4480 	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4481 								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4482 	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4483 								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4484 	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4485 								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4486 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4487 								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4488 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4489 								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4490 	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4491 							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4492 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4493 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4494 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4495 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4496 	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4497 						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
4498 
4499 }
4500 
4501 static void ci_get_memory_type(struct radeon_device *rdev)
4502 {
4503 	struct ci_power_info *pi = ci_get_pi(rdev);
4504 	u32 tmp;
4505 
4506 	tmp = RREG32(MC_SEQ_MISC0);
4507 
4508 	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4509 	    MC_SEQ_MISC0_GDDR5_VALUE)
4510 		pi->mem_gddr5 = true;
4511 	else
4512 		pi->mem_gddr5 = false;
4513 
4514 }
4515 
4516 static void ci_update_current_ps(struct radeon_device *rdev,
4517 				 struct radeon_ps *rps)
4518 {
4519 	struct ci_ps *new_ps = ci_get_ps(rps);
4520 	struct ci_power_info *pi = ci_get_pi(rdev);
4521 
4522 	pi->current_rps = *rps;
4523 	pi->current_ps = *new_ps;
4524 	pi->current_rps.ps_priv = &pi->current_ps;
4525 }
4526 
4527 static void ci_update_requested_ps(struct radeon_device *rdev,
4528 				   struct radeon_ps *rps)
4529 {
4530 	struct ci_ps *new_ps = ci_get_ps(rps);
4531 	struct ci_power_info *pi = ci_get_pi(rdev);
4532 
4533 	pi->requested_rps = *rps;
4534 	pi->requested_ps = *new_ps;
4535 	pi->requested_rps.ps_priv = &pi->requested_ps;
4536 }
4537 
4538 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4539 {
4540 	struct ci_power_info *pi = ci_get_pi(rdev);
4541 	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4542 	struct radeon_ps *new_ps = &requested_ps;
4543 
4544 	ci_update_requested_ps(rdev, new_ps);
4545 
4546 	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4547 
4548 	return 0;
4549 }
4550 
4551 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4552 {
4553 	struct ci_power_info *pi = ci_get_pi(rdev);
4554 	struct radeon_ps *new_ps = &pi->requested_rps;
4555 
4556 	ci_update_current_ps(rdev, new_ps);
4557 }
4558 
4559 
4560 void ci_dpm_setup_asic(struct radeon_device *rdev)
4561 {
4562 	int r;
4563 
4564 	r = ci_mc_load_microcode(rdev);
4565 	if (r)
4566 		DRM_ERROR("Failed to load MC firmware!\n");
4567 	ci_read_clock_registers(rdev);
4568 	ci_get_memory_type(rdev);
4569 	ci_enable_acpi_power_management(rdev);
4570 	ci_init_sclk_t(rdev);
4571 }
4572 
4573 int ci_dpm_enable(struct radeon_device *rdev)
4574 {
4575 	struct ci_power_info *pi = ci_get_pi(rdev);
4576 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4577 	int ret;
4578 
4579 	if (ci_is_smc_running(rdev))
4580 		return -EINVAL;
4581 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4582 		ci_enable_voltage_control(rdev);
4583 		ret = ci_construct_voltage_tables(rdev);
4584 		if (ret) {
4585 			DRM_ERROR("ci_construct_voltage_tables failed\n");
4586 			return ret;
4587 		}
4588 	}
4589 	if (pi->caps_dynamic_ac_timing) {
4590 		ret = ci_initialize_mc_reg_table(rdev);
4591 		if (ret)
4592 			pi->caps_dynamic_ac_timing = false;
4593 	}
4594 	if (pi->dynamic_ss)
4595 		ci_enable_spread_spectrum(rdev, true);
4596 	if (pi->thermal_protection)
4597 		ci_enable_thermal_protection(rdev, true);
4598 	ci_program_sstp(rdev);
4599 	ci_enable_display_gap(rdev);
4600 	ci_program_vc(rdev);
4601 	ret = ci_upload_firmware(rdev);
4602 	if (ret) {
4603 		DRM_ERROR("ci_upload_firmware failed\n");
4604 		return ret;
4605 	}
4606 	ret = ci_process_firmware_header(rdev);
4607 	if (ret) {
4608 		DRM_ERROR("ci_process_firmware_header failed\n");
4609 		return ret;
4610 	}
4611 	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4612 	if (ret) {
4613 		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4614 		return ret;
4615 	}
4616 	ret = ci_init_smc_table(rdev);
4617 	if (ret) {
4618 		DRM_ERROR("ci_init_smc_table failed\n");
4619 		return ret;
4620 	}
4621 	ret = ci_init_arb_table_index(rdev);
4622 	if (ret) {
4623 		DRM_ERROR("ci_init_arb_table_index failed\n");
4624 		return ret;
4625 	}
4626 	if (pi->caps_dynamic_ac_timing) {
4627 		ret = ci_populate_initial_mc_reg_table(rdev);
4628 		if (ret) {
4629 			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4630 			return ret;
4631 		}
4632 	}
4633 	ret = ci_populate_pm_base(rdev);
4634 	if (ret) {
4635 		DRM_ERROR("ci_populate_pm_base failed\n");
4636 		return ret;
4637 	}
4638 	ci_dpm_start_smc(rdev);
4639 	ci_enable_vr_hot_gpio_interrupt(rdev);
4640 	ret = ci_notify_smc_display_change(rdev, false);
4641 	if (ret) {
4642 		DRM_ERROR("ci_notify_smc_display_change failed\n");
4643 		return ret;
4644 	}
4645 	ci_enable_sclk_control(rdev, true);
4646 	ret = ci_enable_ulv(rdev, true);
4647 	if (ret) {
4648 		DRM_ERROR("ci_enable_ulv failed\n");
4649 		return ret;
4650 	}
4651 	ret = ci_enable_ds_master_switch(rdev, true);
4652 	if (ret) {
4653 		DRM_ERROR("ci_enable_ds_master_switch failed\n");
4654 		return ret;
4655 	}
4656 	ret = ci_start_dpm(rdev);
4657 	if (ret) {
4658 		DRM_ERROR("ci_start_dpm failed\n");
4659 		return ret;
4660 	}
4661 	ret = ci_enable_didt(rdev, true);
4662 	if (ret) {
4663 		DRM_ERROR("ci_enable_didt failed\n");
4664 		return ret;
4665 	}
4666 	ret = ci_enable_smc_cac(rdev, true);
4667 	if (ret) {
4668 		DRM_ERROR("ci_enable_smc_cac failed\n");
4669 		return ret;
4670 	}
4671 	ret = ci_enable_power_containment(rdev, true);
4672 	if (ret) {
4673 		DRM_ERROR("ci_enable_power_containment failed\n");
4674 		return ret;
4675 	}
4676 
4677 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4678 
4679 	ci_update_current_ps(rdev, boot_ps);
4680 
4681 	return 0;
4682 }
4683 
4684 int ci_dpm_late_enable(struct radeon_device *rdev)
4685 {
4686 	int ret;
4687 
4688 	if (rdev->irq.installed &&
4689 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4690 #if 0
4691 		PPSMC_Result result;
4692 #endif
4693 		ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4694 		if (ret) {
4695 			DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4696 			return ret;
4697 		}
4698 		rdev->irq.dpm_thermal = true;
4699 		radeon_irq_set(rdev);
4700 #if 0
4701 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4702 
4703 		if (result != PPSMC_Result_OK)
4704 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4705 #endif
4706 	}
4707 
4708 	ci_dpm_powergate_uvd(rdev, true);
4709 
4710 	return 0;
4711 }
4712 
4713 void ci_dpm_disable(struct radeon_device *rdev)
4714 {
4715 	struct ci_power_info *pi = ci_get_pi(rdev);
4716 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4717 
4718 	ci_dpm_powergate_uvd(rdev, false);
4719 
4720 	if (!ci_is_smc_running(rdev))
4721 		return;
4722 
4723 	if (pi->thermal_protection)
4724 		ci_enable_thermal_protection(rdev, false);
4725 	ci_enable_power_containment(rdev, false);
4726 	ci_enable_smc_cac(rdev, false);
4727 	ci_enable_didt(rdev, false);
4728 	ci_enable_spread_spectrum(rdev, false);
4729 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4730 	ci_stop_dpm(rdev);
4731 	ci_enable_ds_master_switch(rdev, true);
4732 	ci_enable_ulv(rdev, false);
4733 	ci_clear_vc(rdev);
4734 	ci_reset_to_default(rdev);
4735 	ci_dpm_stop_smc(rdev);
4736 	ci_force_switch_to_arb_f0(rdev);
4737 
4738 	ci_update_current_ps(rdev, boot_ps);
4739 }
4740 
4741 int ci_dpm_set_power_state(struct radeon_device *rdev)
4742 {
4743 	struct ci_power_info *pi = ci_get_pi(rdev);
4744 	struct radeon_ps *new_ps = &pi->requested_rps;
4745 	struct radeon_ps *old_ps = &pi->current_rps;
4746 	int ret;
4747 
4748 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4749 	if (pi->pcie_performance_request)
4750 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4751 	ret = ci_freeze_sclk_mclk_dpm(rdev);
4752 	if (ret) {
4753 		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4754 		return ret;
4755 	}
4756 	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4757 	if (ret) {
4758 		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4759 		return ret;
4760 	}
4761 	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4762 	if (ret) {
4763 		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4764 		return ret;
4765 	}
4766 
4767 	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4768 	if (ret) {
4769 		DRM_ERROR("ci_update_vce_dpm failed\n");
4770 		return ret;
4771 	}
4772 
4773 	ret = ci_update_sclk_t(rdev);
4774 	if (ret) {
4775 		DRM_ERROR("ci_update_sclk_t failed\n");
4776 		return ret;
4777 	}
4778 	if (pi->caps_dynamic_ac_timing) {
4779 		ret = ci_update_and_upload_mc_reg_table(rdev);
4780 		if (ret) {
4781 			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4782 			return ret;
4783 		}
4784 	}
4785 	ret = ci_program_memory_timing_parameters(rdev);
4786 	if (ret) {
4787 		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4788 		return ret;
4789 	}
4790 	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4791 	if (ret) {
4792 		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4793 		return ret;
4794 	}
4795 	ret = ci_upload_dpm_level_enable_mask(rdev);
4796 	if (ret) {
4797 		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4798 		return ret;
4799 	}
4800 	if (pi->pcie_performance_request)
4801 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4802 
4803 	return 0;
4804 }
4805 
4806 int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4807 {
4808 	return ci_power_control_set_level(rdev);
4809 }
4810 
4811 void ci_dpm_reset_asic(struct radeon_device *rdev)
4812 {
4813 	ci_set_boot_state(rdev);
4814 }
4815 
4816 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4817 {
4818 	ci_program_display_gap(rdev);
4819 }
4820 
4821 union power_info {
4822 	struct _ATOM_POWERPLAY_INFO info;
4823 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
4824 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
4825 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4826 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4827 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4828 };
4829 
4830 union pplib_clock_info {
4831 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4832 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4833 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4834 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4835 	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4836 	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4837 };
4838 
4839 union pplib_power_state {
4840 	struct _ATOM_PPLIB_STATE v1;
4841 	struct _ATOM_PPLIB_STATE_V2 v2;
4842 };
4843 
4844 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4845 					  struct radeon_ps *rps,
4846 					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4847 					  u8 table_rev)
4848 {
4849 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4850 	rps->class = le16_to_cpu(non_clock_info->usClassification);
4851 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4852 
4853 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4854 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4855 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4856 	} else {
4857 		rps->vclk = 0;
4858 		rps->dclk = 0;
4859 	}
4860 
4861 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4862 		rdev->pm.dpm.boot_ps = rps;
4863 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4864 		rdev->pm.dpm.uvd_ps = rps;
4865 }
4866 
4867 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4868 				      struct radeon_ps *rps, int index,
4869 				      union pplib_clock_info *clock_info)
4870 {
4871 	struct ci_power_info *pi = ci_get_pi(rdev);
4872 	struct ci_ps *ps = ci_get_ps(rps);
4873 	struct ci_pl *pl = &ps->performance_levels[index];
4874 
4875 	ps->performance_level_count = index + 1;
4876 
4877 	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4878 	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4879 	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4880 	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4881 
4882 	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4883 						 pi->sys_pcie_mask,
4884 						 pi->vbios_boot_state.pcie_gen_bootup_value,
4885 						 clock_info->ci.ucPCIEGen);
4886 	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4887 						   pi->vbios_boot_state.pcie_lane_bootup_value,
4888 						   le16_to_cpu(clock_info->ci.usPCIELane));
4889 
4890 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4891 		pi->acpi_pcie_gen = pl->pcie_gen;
4892 	}
4893 
4894 	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4895 		pi->ulv.supported = true;
4896 		pi->ulv.pl = *pl;
4897 		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4898 	}
4899 
4900 	/* patch up boot state */
4901 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4902 		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4903 		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4904 		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4905 		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4906 	}
4907 
4908 	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4909 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4910 		pi->use_pcie_powersaving_levels = true;
4911 		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4912 			pi->pcie_gen_powersaving.max = pl->pcie_gen;
4913 		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4914 			pi->pcie_gen_powersaving.min = pl->pcie_gen;
4915 		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4916 			pi->pcie_lane_powersaving.max = pl->pcie_lane;
4917 		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4918 			pi->pcie_lane_powersaving.min = pl->pcie_lane;
4919 		break;
4920 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4921 		pi->use_pcie_performance_levels = true;
4922 		if (pi->pcie_gen_performance.max < pl->pcie_gen)
4923 			pi->pcie_gen_performance.max = pl->pcie_gen;
4924 		if (pi->pcie_gen_performance.min > pl->pcie_gen)
4925 			pi->pcie_gen_performance.min = pl->pcie_gen;
4926 		if (pi->pcie_lane_performance.max < pl->pcie_lane)
4927 			pi->pcie_lane_performance.max = pl->pcie_lane;
4928 		if (pi->pcie_lane_performance.min > pl->pcie_lane)
4929 			pi->pcie_lane_performance.min = pl->pcie_lane;
4930 		break;
4931 	default:
4932 		break;
4933 	}
4934 }
4935 
4936 static int ci_parse_power_table(struct radeon_device *rdev)
4937 {
4938 	struct radeon_mode_info *mode_info = &rdev->mode_info;
4939 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4940 	union pplib_power_state *power_state;
4941 	int i, j, k, non_clock_array_index, clock_array_index;
4942 	union pplib_clock_info *clock_info;
4943 	struct _StateArray *state_array;
4944 	struct _ClockInfoArray *clock_info_array;
4945 	struct _NonClockInfoArray *non_clock_info_array;
4946 	union power_info *power_info;
4947 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4948         u16 data_offset;
4949 	u8 frev, crev;
4950 	u8 *power_state_offset;
4951 	struct ci_ps *ps;
4952 
4953 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4954 				   &frev, &crev, &data_offset))
4955 		return -EINVAL;
4956 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4957 
4958 	state_array = (struct _StateArray *)
4959 		(mode_info->atom_context->bios + data_offset +
4960 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4961 	clock_info_array = (struct _ClockInfoArray *)
4962 		(mode_info->atom_context->bios + data_offset +
4963 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4964 	non_clock_info_array = (struct _NonClockInfoArray *)
4965 		(mode_info->atom_context->bios + data_offset +
4966 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4967 
4968 	rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4969 				  state_array->ucNumEntries, GFP_KERNEL);
4970 	if (!rdev->pm.dpm.ps)
4971 		return -ENOMEM;
4972 	power_state_offset = (u8 *)state_array->states;
4973 	for (i = 0; i < state_array->ucNumEntries; i++) {
4974 		u8 *idx;
4975 		power_state = (union pplib_power_state *)power_state_offset;
4976 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
4977 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4978 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
4979 		if (!rdev->pm.power_state[i].clock_info)
4980 			return -EINVAL;
4981 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4982 		if (ps == NULL) {
4983 			kfree(rdev->pm.dpm.ps);
4984 			return -ENOMEM;
4985 		}
4986 		rdev->pm.dpm.ps[i].ps_priv = ps;
4987 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4988 					      non_clock_info,
4989 					      non_clock_info_array->ucEntrySize);
4990 		k = 0;
4991 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
4992 		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4993 			clock_array_index = idx[j];
4994 			if (clock_array_index >= clock_info_array->ucNumEntries)
4995 				continue;
4996 			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4997 				break;
4998 			clock_info = (union pplib_clock_info *)
4999 				((u8 *)&clock_info_array->clockInfo[0] +
5000 				 (clock_array_index * clock_info_array->ucEntrySize));
5001 			ci_parse_pplib_clock_info(rdev,
5002 						  &rdev->pm.dpm.ps[i], k,
5003 						  clock_info);
5004 			k++;
5005 		}
5006 		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5007 	}
5008 	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5009 
5010 	/* fill in the vce power states */
5011 	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5012 		u32 sclk, mclk;
5013 		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5014 		clock_info = (union pplib_clock_info *)
5015 			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5016 		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5017 		sclk |= clock_info->ci.ucEngineClockHigh << 16;
5018 		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5019 		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5020 		rdev->pm.dpm.vce_states[i].sclk = sclk;
5021 		rdev->pm.dpm.vce_states[i].mclk = mclk;
5022 	}
5023 
5024 	return 0;
5025 }
5026 
5027 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5028 				    struct ci_vbios_boot_state *boot_state)
5029 {
5030 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5031 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5032 	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5033 	u8 frev, crev;
5034 	u16 data_offset;
5035 
5036 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5037 				   &frev, &crev, &data_offset)) {
5038 		firmware_info =
5039 			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5040 						    data_offset);
5041 		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5042 		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5043 		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5044 		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5045 		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5046 		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5047 		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5048 
5049 		return 0;
5050 	}
5051 	return -EINVAL;
5052 }
5053 
5054 void ci_dpm_fini(struct radeon_device *rdev)
5055 {
5056 	int i;
5057 
5058 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5059 		kfree(rdev->pm.dpm.ps[i].ps_priv);
5060 	}
5061 	kfree(rdev->pm.dpm.ps);
5062 	kfree(rdev->pm.dpm.priv);
5063 	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5064 	r600_free_extended_power_table(rdev);
5065 }
5066 
5067 int ci_dpm_init(struct radeon_device *rdev)
5068 {
5069 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5070 	u16 data_offset, size;
5071 	u8 frev, crev;
5072 	struct ci_power_info *pi;
5073 	int ret;
5074 	u32 mask;
5075 
5076 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5077 	if (pi == NULL)
5078 		return -ENOMEM;
5079 	rdev->pm.dpm.priv = pi;
5080 
5081 	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5082 	if (ret)
5083 		pi->sys_pcie_mask = 0;
5084 	else
5085 		pi->sys_pcie_mask = mask;
5086 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5087 
5088 	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5089 	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5090 	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5091 	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5092 
5093 	pi->pcie_lane_performance.max = 0;
5094 	pi->pcie_lane_performance.min = 16;
5095 	pi->pcie_lane_powersaving.max = 0;
5096 	pi->pcie_lane_powersaving.min = 16;
5097 
5098 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5099 	if (ret) {
5100 		ci_dpm_fini(rdev);
5101 		return ret;
5102 	}
5103 
5104 	ret = r600_get_platform_caps(rdev);
5105 	if (ret) {
5106 		ci_dpm_fini(rdev);
5107 		return ret;
5108 	}
5109 
5110 	ret = r600_parse_extended_power_table(rdev);
5111 	if (ret) {
5112 		ci_dpm_fini(rdev);
5113 		return ret;
5114 	}
5115 
5116 	ret = ci_parse_power_table(rdev);
5117 	if (ret) {
5118 		ci_dpm_fini(rdev);
5119 		return ret;
5120 	}
5121 
5122         pi->dll_default_on = false;
5123         pi->sram_end = SMC_RAM_END;
5124 
5125 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5126 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5127 	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5128 	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5129 	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5130 	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5131 	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5132 	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5133 
5134 	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5135 
5136 	pi->sclk_dpm_key_disabled = 0;
5137 	pi->mclk_dpm_key_disabled = 0;
5138 	pi->pcie_dpm_key_disabled = 0;
5139 
5140 	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5141 	if ((rdev->pdev->device == 0x6658) &&
5142 	    (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5143 		pi->mclk_dpm_key_disabled = 1;
5144 	}
5145 
5146 	pi->caps_sclk_ds = true;
5147 
5148 	pi->mclk_strobe_mode_threshold = 40000;
5149 	pi->mclk_stutter_mode_threshold = 40000;
5150 	pi->mclk_edc_enable_threshold = 40000;
5151 	pi->mclk_edc_wr_enable_threshold = 40000;
5152 
5153 	ci_initialize_powertune_defaults(rdev);
5154 
5155 	pi->caps_fps = false;
5156 
5157 	pi->caps_sclk_throttle_low_notification = false;
5158 
5159 	pi->caps_uvd_dpm = true;
5160 	pi->caps_vce_dpm = true;
5161 
5162         ci_get_leakage_voltages(rdev);
5163         ci_patch_dependency_tables_with_leakage(rdev);
5164         ci_set_private_data_variables_based_on_pptable(rdev);
5165 
5166 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5167 		kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5168 	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5169 		ci_dpm_fini(rdev);
5170 		return -ENOMEM;
5171 	}
5172 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5173 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5174 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5175 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5176 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5177 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5178 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5179 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5180 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5181 
5182 	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5183 	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5184 	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5185 
5186 	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5187 	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5188 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5189 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5190 
5191 	if (rdev->family == CHIP_HAWAII) {
5192 		pi->thermal_temp_setting.temperature_low = 94500;
5193 		pi->thermal_temp_setting.temperature_high = 95000;
5194 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5195 	} else {
5196 		pi->thermal_temp_setting.temperature_low = 99500;
5197 		pi->thermal_temp_setting.temperature_high = 100000;
5198 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5199 	}
5200 
5201 	pi->uvd_enabled = false;
5202 
5203 	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5204 	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5205 	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5206 	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5207 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5208 	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5209 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5210 
5211 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5212 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5213 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5214 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5215 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5216 		else
5217 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5218         }
5219 
5220 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5221 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5222 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5223 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5224 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5225 		else
5226 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5227 	}
5228 
5229 	pi->vddc_phase_shed_control = true;
5230 
5231 #if defined(CONFIG_ACPI)
5232 	pi->pcie_performance_request =
5233 		radeon_acpi_is_pcie_performance_request_supported(rdev);
5234 #else
5235 	pi->pcie_performance_request = false;
5236 #endif
5237 
5238 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5239                                    &frev, &crev, &data_offset)) {
5240 		pi->caps_sclk_ss_support = true;
5241 		pi->caps_mclk_ss_support = true;
5242 		pi->dynamic_ss = true;
5243 	} else {
5244 		pi->caps_sclk_ss_support = false;
5245 		pi->caps_mclk_ss_support = false;
5246 		pi->dynamic_ss = true;
5247 	}
5248 
5249 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5250 		pi->thermal_protection = true;
5251 	else
5252 		pi->thermal_protection = false;
5253 
5254 	pi->caps_dynamic_ac_timing = true;
5255 
5256 	pi->uvd_power_gated = false;
5257 
5258 	/* make sure dc limits are valid */
5259 	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5260 	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5261 		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5262 			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5263 
5264 	return 0;
5265 }
5266 
5267 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5268 						    struct seq_file *m)
5269 {
5270 	struct ci_power_info *pi = ci_get_pi(rdev);
5271 	struct radeon_ps *rps = &pi->current_rps;
5272 	u32 sclk = ci_get_average_sclk_freq(rdev);
5273 	u32 mclk = ci_get_average_mclk_freq(rdev);
5274 
5275 	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5276 	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
5277 	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5278 		   sclk, mclk);
5279 }
5280 
5281 void ci_dpm_print_power_state(struct radeon_device *rdev,
5282 			      struct radeon_ps *rps)
5283 {
5284 	struct ci_ps *ps = ci_get_ps(rps);
5285 	struct ci_pl *pl;
5286 	int i;
5287 
5288 	r600_dpm_print_class_info(rps->class, rps->class2);
5289 	r600_dpm_print_cap_info(rps->caps);
5290 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5291 	for (i = 0; i < ps->performance_level_count; i++) {
5292 		pl = &ps->performance_levels[i];
5293 		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5294 		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5295 	}
5296 	r600_dpm_print_ps_status(rdev, rps);
5297 }
5298 
5299 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5300 {
5301 	struct ci_power_info *pi = ci_get_pi(rdev);
5302 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5303 
5304 	if (low)
5305 		return requested_state->performance_levels[0].sclk;
5306 	else
5307 		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5308 }
5309 
5310 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5311 {
5312 	struct ci_power_info *pi = ci_get_pi(rdev);
5313 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5314 
5315 	if (low)
5316 		return requested_state->performance_levels[0].mclk;
5317 	else
5318 		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5319 }
5320