xref: /linux/drivers/gpu/drm/radeon/ci_dpm.c (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "radeon.h"
27 #include "radeon_asic.h"
28 #include "radeon_ucode.h"
29 #include "cikd.h"
30 #include "r600_dpm.h"
31 #include "ci_dpm.h"
32 #include "atom.h"
33 #include <linux/seq_file.h>
34 
35 #define MC_CG_ARB_FREQ_F0           0x0a
36 #define MC_CG_ARB_FREQ_F1           0x0b
37 #define MC_CG_ARB_FREQ_F2           0x0c
38 #define MC_CG_ARB_FREQ_F3           0x0d
39 
40 #define SMC_RAM_END 0x40000
41 
42 #define VOLTAGE_SCALE               4
43 #define VOLTAGE_VID_OFFSET_SCALE1    625
44 #define VOLTAGE_VID_OFFSET_SCALE2    100
45 
46 static const struct ci_pt_defaults defaults_hawaii_xt =
47 {
48 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
49 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
50 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
51 };
52 
53 static const struct ci_pt_defaults defaults_hawaii_pro =
54 {
55 	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
56 	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
57 	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
58 };
59 
60 static const struct ci_pt_defaults defaults_bonaire_xt =
61 {
62 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
63 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
64 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
65 };
66 
67 static const struct ci_pt_defaults defaults_bonaire_pro =
68 {
69 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
70 	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
71 	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
72 };
73 
74 static const struct ci_pt_defaults defaults_saturn_xt =
75 {
76 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
77 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
78 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
79 };
80 
81 static const struct ci_pt_defaults defaults_saturn_pro =
82 {
83 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
84 	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
85 	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
86 };
87 
88 static const struct ci_pt_config_reg didt_config_ci[] =
89 {
90 	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
103 	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
104 	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
105 	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
106 	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108 	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
121 	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
122 	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
123 	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
124 	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
139 	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
140 	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
141 	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
142 	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147 	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148 	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149 	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
157 	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
158 	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
159 	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
160 	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
161 	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 	{ 0xFFFFFFFF }
163 };
164 
165 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
166 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
167 				       u32 arb_freq_src, u32 arb_freq_dest);
168 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
169 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
170 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
171 						     u32 max_voltage_steps,
172 						     struct atom_voltage_table *voltage_table);
173 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
174 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
175 extern int ci_mc_load_microcode(struct radeon_device *rdev);
176 extern void cik_update_cg(struct radeon_device *rdev,
177 			  u32 block, bool enable);
178 
179 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
180 					 struct atom_voltage_table_entry *voltage_table,
181 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
182 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
183 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
184 				       u32 target_tdp);
185 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
186 
187 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
188 						      PPSMC_Msg msg, u32 parameter);
189 
190 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
191 {
192         struct ci_power_info *pi = rdev->pm.dpm.priv;
193 
194         return pi;
195 }
196 
197 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
198 {
199 	struct ci_ps *ps = rps->ps_priv;
200 
201 	return ps;
202 }
203 
204 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
205 {
206 	struct ci_power_info *pi = ci_get_pi(rdev);
207 
208 	switch (rdev->pdev->device) {
209 	case 0x6649:
210 	case 0x6650:
211 	case 0x6651:
212 	case 0x6658:
213 	case 0x665C:
214 	case 0x665D:
215 	default:
216 		pi->powertune_defaults = &defaults_bonaire_xt;
217 		break;
218 	case 0x6640:
219 	case 0x6641:
220 	case 0x6646:
221 	case 0x6647:
222 		pi->powertune_defaults = &defaults_saturn_xt;
223 		break;
224 	case 0x67B8:
225 	case 0x67B0:
226 		pi->powertune_defaults = &defaults_hawaii_xt;
227 		break;
228 	case 0x67BA:
229 	case 0x67B1:
230 		pi->powertune_defaults = &defaults_hawaii_pro;
231 		break;
232 	case 0x67A0:
233 	case 0x67A1:
234 	case 0x67A2:
235 	case 0x67A8:
236 	case 0x67A9:
237 	case 0x67AA:
238 	case 0x67B9:
239 	case 0x67BE:
240 		pi->powertune_defaults = &defaults_bonaire_xt;
241 		break;
242 	}
243 
244 	pi->dte_tj_offset = 0;
245 
246 	pi->caps_power_containment = true;
247 	pi->caps_cac = false;
248 	pi->caps_sq_ramping = false;
249 	pi->caps_db_ramping = false;
250 	pi->caps_td_ramping = false;
251 	pi->caps_tcp_ramping = false;
252 
253 	if (pi->caps_power_containment) {
254 		pi->caps_cac = true;
255 		if (rdev->family == CHIP_HAWAII)
256 			pi->enable_bapm_feature = false;
257 		else
258 			pi->enable_bapm_feature = true;
259 		pi->enable_tdc_limit_feature = true;
260 		pi->enable_pkg_pwr_tracking_feature = true;
261 	}
262 }
263 
264 static u8 ci_convert_to_vid(u16 vddc)
265 {
266 	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
267 }
268 
269 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
270 {
271 	struct ci_power_info *pi = ci_get_pi(rdev);
272 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
273 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
274 	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
275 	u32 i;
276 
277 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
278 		return -EINVAL;
279 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
280 		return -EINVAL;
281 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
282 	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
283 		return -EINVAL;
284 
285 	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
286 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
287 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
288 			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
289 			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
290 		} else {
291 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
292 			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
293 		}
294 	}
295 	return 0;
296 }
297 
298 static int ci_populate_vddc_vid(struct radeon_device *rdev)
299 {
300 	struct ci_power_info *pi = ci_get_pi(rdev);
301 	u8 *vid = pi->smc_powertune_table.VddCVid;
302 	u32 i;
303 
304 	if (pi->vddc_voltage_table.count > 8)
305 		return -EINVAL;
306 
307 	for (i = 0; i < pi->vddc_voltage_table.count; i++)
308 		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
309 
310 	return 0;
311 }
312 
313 static int ci_populate_svi_load_line(struct radeon_device *rdev)
314 {
315 	struct ci_power_info *pi = ci_get_pi(rdev);
316 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
317 
318 	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
319 	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
320 	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
321 	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
322 
323 	return 0;
324 }
325 
326 static int ci_populate_tdc_limit(struct radeon_device *rdev)
327 {
328 	struct ci_power_info *pi = ci_get_pi(rdev);
329 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
330 	u16 tdc_limit;
331 
332 	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
333 	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
334 	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
335 		pt_defaults->tdc_vddc_throttle_release_limit_perc;
336 	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
337 
338 	return 0;
339 }
340 
341 static int ci_populate_dw8(struct radeon_device *rdev)
342 {
343 	struct ci_power_info *pi = ci_get_pi(rdev);
344 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
345 	int ret;
346 
347 	ret = ci_read_smc_sram_dword(rdev,
348 				     SMU7_FIRMWARE_HEADER_LOCATION +
349 				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
350 				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
351 				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
352 				     pi->sram_end);
353 	if (ret)
354 		return -EINVAL;
355 	else
356 		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
357 
358 	return 0;
359 }
360 
361 static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
362 {
363 	struct ci_power_info *pi = ci_get_pi(rdev);
364 
365 	if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
366 	    (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
367 		rdev->pm.dpm.fan.fan_output_sensitivity =
368 			rdev->pm.dpm.fan.default_fan_output_sensitivity;
369 
370 	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
371 		cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
372 
373 	return 0;
374 }
375 
376 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
377 {
378 	struct ci_power_info *pi = ci_get_pi(rdev);
379 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
380 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
381 	int i, min, max;
382 
383 	min = max = hi_vid[0];
384 	for (i = 0; i < 8; i++) {
385 		if (0 != hi_vid[i]) {
386 			if (min > hi_vid[i])
387 				min = hi_vid[i];
388 			if (max < hi_vid[i])
389 				max = hi_vid[i];
390 		}
391 
392 		if (0 != lo_vid[i]) {
393 			if (min > lo_vid[i])
394 				min = lo_vid[i];
395 			if (max < lo_vid[i])
396 				max = lo_vid[i];
397 		}
398 	}
399 
400 	if ((min == 0) || (max == 0))
401 		return -EINVAL;
402 	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
403 	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
404 
405 	return 0;
406 }
407 
408 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
409 {
410 	struct ci_power_info *pi = ci_get_pi(rdev);
411 	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
412 	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
413 	struct radeon_cac_tdp_table *cac_tdp_table =
414 		rdev->pm.dpm.dyn_state.cac_tdp_table;
415 
416 	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
417 	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
418 
419 	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
420 	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
421 
422 	return 0;
423 }
424 
425 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
426 {
427 	struct ci_power_info *pi = ci_get_pi(rdev);
428 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
429 	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
430 	struct radeon_cac_tdp_table *cac_tdp_table =
431 		rdev->pm.dpm.dyn_state.cac_tdp_table;
432 	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
433 	int i, j, k;
434 	const u16 *def1;
435 	const u16 *def2;
436 
437 	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
438 	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
439 
440 	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
441 	dpm_table->GpuTjMax =
442 		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
443 	dpm_table->GpuTjHyst = 8;
444 
445 	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
446 
447 	if (ppm) {
448 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
449 		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
450 	} else {
451 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
452 		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
453 	}
454 
455 	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
456 	def1 = pt_defaults->bapmti_r;
457 	def2 = pt_defaults->bapmti_rc;
458 
459 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
460 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
461 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
462 				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
463 				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
464 				def1++;
465 				def2++;
466 			}
467 		}
468 	}
469 
470 	return 0;
471 }
472 
473 static int ci_populate_pm_base(struct radeon_device *rdev)
474 {
475 	struct ci_power_info *pi = ci_get_pi(rdev);
476 	u32 pm_fuse_table_offset;
477 	int ret;
478 
479 	if (pi->caps_power_containment) {
480 		ret = ci_read_smc_sram_dword(rdev,
481 					     SMU7_FIRMWARE_HEADER_LOCATION +
482 					     offsetof(SMU7_Firmware_Header, PmFuseTable),
483 					     &pm_fuse_table_offset, pi->sram_end);
484 		if (ret)
485 			return ret;
486 		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
487 		if (ret)
488 			return ret;
489 		ret = ci_populate_vddc_vid(rdev);
490 		if (ret)
491 			return ret;
492 		ret = ci_populate_svi_load_line(rdev);
493 		if (ret)
494 			return ret;
495 		ret = ci_populate_tdc_limit(rdev);
496 		if (ret)
497 			return ret;
498 		ret = ci_populate_dw8(rdev);
499 		if (ret)
500 			return ret;
501 		ret = ci_populate_fuzzy_fan(rdev);
502 		if (ret)
503 			return ret;
504 		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
505 		if (ret)
506 			return ret;
507 		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
508 		if (ret)
509 			return ret;
510 		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
511 					   (u8 *)&pi->smc_powertune_table,
512 					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
513 		if (ret)
514 			return ret;
515 	}
516 
517 	return 0;
518 }
519 
520 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
521 {
522 	struct ci_power_info *pi = ci_get_pi(rdev);
523 	u32 data;
524 
525 	if (pi->caps_sq_ramping) {
526 		data = RREG32_DIDT(DIDT_SQ_CTRL0);
527 		if (enable)
528 			data |= DIDT_CTRL_EN;
529 		else
530 			data &= ~DIDT_CTRL_EN;
531 		WREG32_DIDT(DIDT_SQ_CTRL0, data);
532 	}
533 
534 	if (pi->caps_db_ramping) {
535 		data = RREG32_DIDT(DIDT_DB_CTRL0);
536 		if (enable)
537 			data |= DIDT_CTRL_EN;
538 		else
539 			data &= ~DIDT_CTRL_EN;
540 		WREG32_DIDT(DIDT_DB_CTRL0, data);
541 	}
542 
543 	if (pi->caps_td_ramping) {
544 		data = RREG32_DIDT(DIDT_TD_CTRL0);
545 		if (enable)
546 			data |= DIDT_CTRL_EN;
547 		else
548 			data &= ~DIDT_CTRL_EN;
549 		WREG32_DIDT(DIDT_TD_CTRL0, data);
550 	}
551 
552 	if (pi->caps_tcp_ramping) {
553 		data = RREG32_DIDT(DIDT_TCP_CTRL0);
554 		if (enable)
555 			data |= DIDT_CTRL_EN;
556 		else
557 			data &= ~DIDT_CTRL_EN;
558 		WREG32_DIDT(DIDT_TCP_CTRL0, data);
559 	}
560 }
561 
562 static int ci_program_pt_config_registers(struct radeon_device *rdev,
563 					  const struct ci_pt_config_reg *cac_config_regs)
564 {
565 	const struct ci_pt_config_reg *config_regs = cac_config_regs;
566 	u32 data;
567 	u32 cache = 0;
568 
569 	if (config_regs == NULL)
570 		return -EINVAL;
571 
572 	while (config_regs->offset != 0xFFFFFFFF) {
573 		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
574 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
575 		} else {
576 			switch (config_regs->type) {
577 			case CISLANDS_CONFIGREG_SMC_IND:
578 				data = RREG32_SMC(config_regs->offset);
579 				break;
580 			case CISLANDS_CONFIGREG_DIDT_IND:
581 				data = RREG32_DIDT(config_regs->offset);
582 				break;
583 			default:
584 				data = RREG32(config_regs->offset << 2);
585 				break;
586 			}
587 
588 			data &= ~config_regs->mask;
589 			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
590 			data |= cache;
591 
592 			switch (config_regs->type) {
593 			case CISLANDS_CONFIGREG_SMC_IND:
594 				WREG32_SMC(config_regs->offset, data);
595 				break;
596 			case CISLANDS_CONFIGREG_DIDT_IND:
597 				WREG32_DIDT(config_regs->offset, data);
598 				break;
599 			default:
600 				WREG32(config_regs->offset << 2, data);
601 				break;
602 			}
603 			cache = 0;
604 		}
605 		config_regs++;
606 	}
607 	return 0;
608 }
609 
610 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
611 {
612 	struct ci_power_info *pi = ci_get_pi(rdev);
613 	int ret;
614 
615 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
616 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
617 		cik_enter_rlc_safe_mode(rdev);
618 
619 		if (enable) {
620 			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
621 			if (ret) {
622 				cik_exit_rlc_safe_mode(rdev);
623 				return ret;
624 			}
625 		}
626 
627 		ci_do_enable_didt(rdev, enable);
628 
629 		cik_exit_rlc_safe_mode(rdev);
630 	}
631 
632 	return 0;
633 }
634 
635 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
636 {
637 	struct ci_power_info *pi = ci_get_pi(rdev);
638 	PPSMC_Result smc_result;
639 	int ret = 0;
640 
641 	if (enable) {
642 		pi->power_containment_features = 0;
643 		if (pi->caps_power_containment) {
644 			if (pi->enable_bapm_feature) {
645 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
646 				if (smc_result != PPSMC_Result_OK)
647 					ret = -EINVAL;
648 				else
649 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
650 			}
651 
652 			if (pi->enable_tdc_limit_feature) {
653 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
654 				if (smc_result != PPSMC_Result_OK)
655 					ret = -EINVAL;
656 				else
657 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
658 			}
659 
660 			if (pi->enable_pkg_pwr_tracking_feature) {
661 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
662 				if (smc_result != PPSMC_Result_OK) {
663 					ret = -EINVAL;
664 				} else {
665 					struct radeon_cac_tdp_table *cac_tdp_table =
666 						rdev->pm.dpm.dyn_state.cac_tdp_table;
667 					u32 default_pwr_limit =
668 						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
669 
670 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
671 
672 					ci_set_power_limit(rdev, default_pwr_limit);
673 				}
674 			}
675 		}
676 	} else {
677 		if (pi->caps_power_containment && pi->power_containment_features) {
678 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
679 				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
680 
681 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
682 				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
683 
684 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
685 				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
686 			pi->power_containment_features = 0;
687 		}
688 	}
689 
690 	return ret;
691 }
692 
693 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
694 {
695 	struct ci_power_info *pi = ci_get_pi(rdev);
696 	PPSMC_Result smc_result;
697 	int ret = 0;
698 
699 	if (pi->caps_cac) {
700 		if (enable) {
701 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
702 			if (smc_result != PPSMC_Result_OK) {
703 				ret = -EINVAL;
704 				pi->cac_enabled = false;
705 			} else {
706 				pi->cac_enabled = true;
707 			}
708 		} else if (pi->cac_enabled) {
709 			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
710 			pi->cac_enabled = false;
711 		}
712 	}
713 
714 	return ret;
715 }
716 
717 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
718 					    bool enable)
719 {
720 	struct ci_power_info *pi = ci_get_pi(rdev);
721 	PPSMC_Result smc_result = PPSMC_Result_OK;
722 
723 	if (pi->thermal_sclk_dpm_enabled) {
724 		if (enable)
725 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
726 		else
727 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
728 	}
729 
730 	if (smc_result == PPSMC_Result_OK)
731 		return 0;
732 	else
733 		return -EINVAL;
734 }
735 
736 static int ci_power_control_set_level(struct radeon_device *rdev)
737 {
738 	struct ci_power_info *pi = ci_get_pi(rdev);
739 	struct radeon_cac_tdp_table *cac_tdp_table =
740 		rdev->pm.dpm.dyn_state.cac_tdp_table;
741 	s32 adjust_percent;
742 	s32 target_tdp;
743 	int ret = 0;
744 	bool adjust_polarity = false; /* ??? */
745 
746 	if (pi->caps_power_containment) {
747 		adjust_percent = adjust_polarity ?
748 			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
749 		target_tdp = ((100 + adjust_percent) *
750 			      (s32)cac_tdp_table->configurable_tdp) / 100;
751 
752 		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
753 	}
754 
755 	return ret;
756 }
757 
758 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
759 {
760 	struct ci_power_info *pi = ci_get_pi(rdev);
761 
762 	if (pi->uvd_power_gated == gate)
763 		return;
764 
765 	pi->uvd_power_gated = gate;
766 
767 	ci_update_uvd_dpm(rdev, gate);
768 }
769 
770 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
771 {
772 	struct ci_power_info *pi = ci_get_pi(rdev);
773 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
774 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
775 
776 	if (vblank_time < switch_limit)
777 		return true;
778 	else
779 		return false;
780 
781 }
782 
783 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
784 					struct radeon_ps *rps)
785 {
786 	struct ci_ps *ps = ci_get_ps(rps);
787 	struct ci_power_info *pi = ci_get_pi(rdev);
788 	struct radeon_clock_and_voltage_limits *max_limits;
789 	bool disable_mclk_switching;
790 	u32 sclk, mclk;
791 	int i;
792 
793 	if (rps->vce_active) {
794 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
795 		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
796 	} else {
797 		rps->evclk = 0;
798 		rps->ecclk = 0;
799 	}
800 
801 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
802 	    ci_dpm_vblank_too_short(rdev))
803 		disable_mclk_switching = true;
804 	else
805 		disable_mclk_switching = false;
806 
807 	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
808 		pi->battery_state = true;
809 	else
810 		pi->battery_state = false;
811 
812 	if (rdev->pm.dpm.ac_power)
813 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
814 	else
815 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
816 
817 	if (rdev->pm.dpm.ac_power == false) {
818 		for (i = 0; i < ps->performance_level_count; i++) {
819 			if (ps->performance_levels[i].mclk > max_limits->mclk)
820 				ps->performance_levels[i].mclk = max_limits->mclk;
821 			if (ps->performance_levels[i].sclk > max_limits->sclk)
822 				ps->performance_levels[i].sclk = max_limits->sclk;
823 		}
824 	}
825 
826 	/* XXX validate the min clocks required for display */
827 
828 	if (disable_mclk_switching) {
829 		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
830 		sclk = ps->performance_levels[0].sclk;
831 	} else {
832 		mclk = ps->performance_levels[0].mclk;
833 		sclk = ps->performance_levels[0].sclk;
834 	}
835 
836 	if (rps->vce_active) {
837 		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
838 			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
839 		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
840 			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
841 	}
842 
843 	ps->performance_levels[0].sclk = sclk;
844 	ps->performance_levels[0].mclk = mclk;
845 
846 	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
847 		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
848 
849 	if (disable_mclk_switching) {
850 		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
851 			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
852 	} else {
853 		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
854 			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
855 	}
856 }
857 
858 static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
859 					    int min_temp, int max_temp)
860 {
861 	int low_temp = 0 * 1000;
862 	int high_temp = 255 * 1000;
863 	u32 tmp;
864 
865 	if (low_temp < min_temp)
866 		low_temp = min_temp;
867 	if (high_temp > max_temp)
868 		high_temp = max_temp;
869 	if (high_temp < low_temp) {
870 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
871 		return -EINVAL;
872 	}
873 
874 	tmp = RREG32_SMC(CG_THERMAL_INT);
875 	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
876 	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
877 		CI_DIG_THERM_INTL(low_temp / 1000);
878 	WREG32_SMC(CG_THERMAL_INT, tmp);
879 
880 #if 0
881 	/* XXX: need to figure out how to handle this properly */
882 	tmp = RREG32_SMC(CG_THERMAL_CTRL);
883 	tmp &= DIG_THERM_DPM_MASK;
884 	tmp |= DIG_THERM_DPM(high_temp / 1000);
885 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
886 #endif
887 
888 	rdev->pm.dpm.thermal.min_temp = low_temp;
889 	rdev->pm.dpm.thermal.max_temp = high_temp;
890 
891 	return 0;
892 }
893 
894 static int ci_thermal_enable_alert(struct radeon_device *rdev,
895 				   bool enable)
896 {
897 	u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
898 	PPSMC_Result result;
899 
900 	if (enable) {
901 		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
902 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
903 		rdev->irq.dpm_thermal = false;
904 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
905 		if (result != PPSMC_Result_OK) {
906 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
907 			return -EINVAL;
908 		}
909 	} else {
910 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
911 		WREG32_SMC(CG_THERMAL_INT, thermal_int);
912 		rdev->irq.dpm_thermal = true;
913 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
914 		if (result != PPSMC_Result_OK) {
915 			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
916 			return -EINVAL;
917 		}
918 	}
919 
920 	return 0;
921 }
922 
923 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
924 {
925 	struct ci_power_info *pi = ci_get_pi(rdev);
926 	u32 tmp;
927 
928 	if (pi->fan_ctrl_is_in_default_mode) {
929 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
930 		pi->fan_ctrl_default_mode = tmp;
931 		tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
932 		pi->t_min = tmp;
933 		pi->fan_ctrl_is_in_default_mode = false;
934 	}
935 
936 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
937 	tmp |= TMIN(0);
938 	WREG32_SMC(CG_FDO_CTRL2, tmp);
939 
940 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
941 	tmp |= FDO_PWM_MODE(mode);
942 	WREG32_SMC(CG_FDO_CTRL2, tmp);
943 }
944 
945 static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
946 {
947 	struct ci_power_info *pi = ci_get_pi(rdev);
948 	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
949 	u32 duty100;
950 	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
951 	u16 fdo_min, slope1, slope2;
952 	u32 reference_clock, tmp;
953 	int ret;
954 	u64 tmp64;
955 
956 	if (!pi->fan_table_start) {
957 		rdev->pm.dpm.fan.ucode_fan_control = false;
958 		return 0;
959 	}
960 
961 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
962 
963 	if (duty100 == 0) {
964 		rdev->pm.dpm.fan.ucode_fan_control = false;
965 		return 0;
966 	}
967 
968 	tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
969 	do_div(tmp64, 10000);
970 	fdo_min = (u16)tmp64;
971 
972 	t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
973 	t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
974 
975 	pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
976 	pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
977 
978 	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
979 	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
980 
981 	fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
982 	fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
983 	fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
984 
985 	fan_table.Slope1 = cpu_to_be16(slope1);
986 	fan_table.Slope2 = cpu_to_be16(slope2);
987 
988 	fan_table.FdoMin = cpu_to_be16(fdo_min);
989 
990 	fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
991 
992 	fan_table.HystUp = cpu_to_be16(1);
993 
994 	fan_table.HystSlope = cpu_to_be16(1);
995 
996 	fan_table.TempRespLim = cpu_to_be16(5);
997 
998 	reference_clock = radeon_get_xclk(rdev);
999 
1000 	fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
1001 					       reference_clock) / 1600);
1002 
1003 	fan_table.FdoMax = cpu_to_be16((u16)duty100);
1004 
1005 	tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
1006 	fan_table.TempSrc = (uint8_t)tmp;
1007 
1008 	ret = ci_copy_bytes_to_smc(rdev,
1009 				   pi->fan_table_start,
1010 				   (u8 *)(&fan_table),
1011 				   sizeof(fan_table),
1012 				   pi->sram_end);
1013 
1014 	if (ret) {
1015 		DRM_ERROR("Failed to load fan table to the SMC.");
1016 		rdev->pm.dpm.fan.ucode_fan_control = false;
1017 	}
1018 
1019 	return 0;
1020 }
1021 
1022 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1023 {
1024 	struct ci_power_info *pi = ci_get_pi(rdev);
1025 	PPSMC_Result ret;
1026 
1027 	if (pi->caps_od_fuzzy_fan_control_support) {
1028 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1029 							PPSMC_StartFanControl,
1030 							FAN_CONTROL_FUZZY);
1031 		if (ret != PPSMC_Result_OK)
1032 			return -EINVAL;
1033 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1034 							PPSMC_MSG_SetFanPwmMax,
1035 							rdev->pm.dpm.fan.default_max_fan_pwm);
1036 		if (ret != PPSMC_Result_OK)
1037 			return -EINVAL;
1038 	} else {
1039 		ret = ci_send_msg_to_smc_with_parameter(rdev,
1040 							PPSMC_StartFanControl,
1041 							FAN_CONTROL_TABLE);
1042 		if (ret != PPSMC_Result_OK)
1043 			return -EINVAL;
1044 	}
1045 
1046 	return 0;
1047 }
1048 
1049 #if 0
1050 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1051 {
1052 	PPSMC_Result ret;
1053 
1054 	ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1055 	if (ret == PPSMC_Result_OK)
1056 		return 0;
1057 	else
1058 		return -EINVAL;
1059 }
1060 
1061 static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1062 					     u32 *speed)
1063 {
1064 	u32 duty, duty100;
1065 	u64 tmp64;
1066 
1067 	if (rdev->pm.no_fan)
1068 		return -ENOENT;
1069 
1070 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1071 	duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1072 
1073 	if (duty100 == 0)
1074 		return -EINVAL;
1075 
1076 	tmp64 = (u64)duty * 100;
1077 	do_div(tmp64, duty100);
1078 	*speed = (u32)tmp64;
1079 
1080 	if (*speed > 100)
1081 		*speed = 100;
1082 
1083 	return 0;
1084 }
1085 
1086 static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1087 					     u32 speed)
1088 {
1089 	u32 tmp;
1090 	u32 duty, duty100;
1091 	u64 tmp64;
1092 
1093 	if (rdev->pm.no_fan)
1094 		return -ENOENT;
1095 
1096 	if (speed > 100)
1097 		return -EINVAL;
1098 
1099 	if (rdev->pm.dpm.fan.ucode_fan_control)
1100 		ci_fan_ctrl_stop_smc_fan_control(rdev);
1101 
1102 	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1103 
1104 	if (duty100 == 0)
1105 		return -EINVAL;
1106 
1107 	tmp64 = (u64)speed * duty100;
1108 	do_div(tmp64, 100);
1109 	duty = (u32)tmp64;
1110 
1111 	tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1112 	tmp |= FDO_STATIC_DUTY(duty);
1113 	WREG32_SMC(CG_FDO_CTRL0, tmp);
1114 
1115 	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1116 
1117 	return 0;
1118 }
1119 
1120 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1121 					 u32 *speed)
1122 {
1123 	u32 tach_period;
1124 	u32 xclk = radeon_get_xclk(rdev);
1125 
1126 	if (rdev->pm.no_fan)
1127 		return -ENOENT;
1128 
1129 	if (rdev->pm.fan_pulses_per_revolution == 0)
1130 		return -ENOENT;
1131 
1132 	tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1133 	if (tach_period == 0)
1134 		return -ENOENT;
1135 
1136 	*speed = 60 * xclk * 10000 / tach_period;
1137 
1138 	return 0;
1139 }
1140 
1141 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1142 					 u32 speed)
1143 {
1144 	u32 tach_period, tmp;
1145 	u32 xclk = radeon_get_xclk(rdev);
1146 
1147 	if (rdev->pm.no_fan)
1148 		return -ENOENT;
1149 
1150 	if (rdev->pm.fan_pulses_per_revolution == 0)
1151 		return -ENOENT;
1152 
1153 	if ((speed < rdev->pm.fan_min_rpm) ||
1154 	    (speed > rdev->pm.fan_max_rpm))
1155 		return -EINVAL;
1156 
1157 	if (rdev->pm.dpm.fan.ucode_fan_control)
1158 		ci_fan_ctrl_stop_smc_fan_control(rdev);
1159 
1160 	tach_period = 60 * xclk * 10000 / (8 * speed);
1161 	tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1162 	tmp |= TARGET_PERIOD(tach_period);
1163 	WREG32_SMC(CG_TACH_CTRL, tmp);
1164 
1165 	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1166 
1167 	return 0;
1168 }
1169 #endif
1170 
1171 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1172 {
1173 	struct ci_power_info *pi = ci_get_pi(rdev);
1174 	u32 tmp;
1175 
1176 	if (!pi->fan_ctrl_is_in_default_mode) {
1177 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1178 		tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1179 		WREG32_SMC(CG_FDO_CTRL2, tmp);
1180 
1181 		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1182 		tmp |= TMIN(pi->t_min);
1183 		WREG32_SMC(CG_FDO_CTRL2, tmp);
1184 		pi->fan_ctrl_is_in_default_mode = true;
1185 	}
1186 }
1187 
1188 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1189 {
1190 	if (rdev->pm.dpm.fan.ucode_fan_control) {
1191 		ci_fan_ctrl_start_smc_fan_control(rdev);
1192 		ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1193 	}
1194 }
1195 
1196 static void ci_thermal_initialize(struct radeon_device *rdev)
1197 {
1198 	u32 tmp;
1199 
1200 	if (rdev->pm.fan_pulses_per_revolution) {
1201 		tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1202 		tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1203 		WREG32_SMC(CG_TACH_CTRL, tmp);
1204 	}
1205 
1206 	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1207 	tmp |= TACH_PWM_RESP_RATE(0x28);
1208 	WREG32_SMC(CG_FDO_CTRL2, tmp);
1209 }
1210 
1211 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1212 {
1213 	int ret;
1214 
1215 	ci_thermal_initialize(rdev);
1216 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1217 	if (ret)
1218 		return ret;
1219 	ret = ci_thermal_enable_alert(rdev, true);
1220 	if (ret)
1221 		return ret;
1222 	if (rdev->pm.dpm.fan.ucode_fan_control) {
1223 		ret = ci_thermal_setup_fan_table(rdev);
1224 		if (ret)
1225 			return ret;
1226 		ci_thermal_start_smc_fan_control(rdev);
1227 	}
1228 
1229 	return 0;
1230 }
1231 
1232 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1233 {
1234 	if (!rdev->pm.no_fan)
1235 		ci_fan_ctrl_set_default_mode(rdev);
1236 }
1237 
1238 #if 0
1239 static int ci_read_smc_soft_register(struct radeon_device *rdev,
1240 				     u16 reg_offset, u32 *value)
1241 {
1242 	struct ci_power_info *pi = ci_get_pi(rdev);
1243 
1244 	return ci_read_smc_sram_dword(rdev,
1245 				      pi->soft_regs_start + reg_offset,
1246 				      value, pi->sram_end);
1247 }
1248 #endif
1249 
1250 static int ci_write_smc_soft_register(struct radeon_device *rdev,
1251 				      u16 reg_offset, u32 value)
1252 {
1253 	struct ci_power_info *pi = ci_get_pi(rdev);
1254 
1255 	return ci_write_smc_sram_dword(rdev,
1256 				       pi->soft_regs_start + reg_offset,
1257 				       value, pi->sram_end);
1258 }
1259 
1260 static void ci_init_fps_limits(struct radeon_device *rdev)
1261 {
1262 	struct ci_power_info *pi = ci_get_pi(rdev);
1263 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1264 
1265 	if (pi->caps_fps) {
1266 		u16 tmp;
1267 
1268 		tmp = 45;
1269 		table->FpsHighT = cpu_to_be16(tmp);
1270 
1271 		tmp = 30;
1272 		table->FpsLowT = cpu_to_be16(tmp);
1273 	}
1274 }
1275 
1276 static int ci_update_sclk_t(struct radeon_device *rdev)
1277 {
1278 	struct ci_power_info *pi = ci_get_pi(rdev);
1279 	int ret = 0;
1280 	u32 low_sclk_interrupt_t = 0;
1281 
1282 	if (pi->caps_sclk_throttle_low_notification) {
1283 		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1284 
1285 		ret = ci_copy_bytes_to_smc(rdev,
1286 					   pi->dpm_table_start +
1287 					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1288 					   (u8 *)&low_sclk_interrupt_t,
1289 					   sizeof(u32), pi->sram_end);
1290 
1291 	}
1292 
1293 	return ret;
1294 }
1295 
1296 static void ci_get_leakage_voltages(struct radeon_device *rdev)
1297 {
1298 	struct ci_power_info *pi = ci_get_pi(rdev);
1299 	u16 leakage_id, virtual_voltage_id;
1300 	u16 vddc, vddci;
1301 	int i;
1302 
1303 	pi->vddc_leakage.count = 0;
1304 	pi->vddci_leakage.count = 0;
1305 
1306 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1307 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1308 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1309 			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
1310 				continue;
1311 			if (vddc != 0 && vddc != virtual_voltage_id) {
1312 				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1313 				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1314 				pi->vddc_leakage.count++;
1315 			}
1316 		}
1317 	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
1318 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1319 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1320 			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
1321 										 virtual_voltage_id,
1322 										 leakage_id) == 0) {
1323 				if (vddc != 0 && vddc != virtual_voltage_id) {
1324 					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1325 					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1326 					pi->vddc_leakage.count++;
1327 				}
1328 				if (vddci != 0 && vddci != virtual_voltage_id) {
1329 					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1330 					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1331 					pi->vddci_leakage.count++;
1332 				}
1333 			}
1334 		}
1335 	}
1336 }
1337 
1338 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1339 {
1340 	struct ci_power_info *pi = ci_get_pi(rdev);
1341 	bool want_thermal_protection;
1342 	enum radeon_dpm_event_src dpm_event_src;
1343 	u32 tmp;
1344 
1345 	switch (sources) {
1346 	case 0:
1347 	default:
1348 		want_thermal_protection = false;
1349 		break;
1350 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1351 		want_thermal_protection = true;
1352 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1353 		break;
1354 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1355 		want_thermal_protection = true;
1356 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1357 		break;
1358 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1359 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1360 		want_thermal_protection = true;
1361 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1362 		break;
1363 	}
1364 
1365 	if (want_thermal_protection) {
1366 #if 0
1367 		/* XXX: need to figure out how to handle this properly */
1368 		tmp = RREG32_SMC(CG_THERMAL_CTRL);
1369 		tmp &= DPM_EVENT_SRC_MASK;
1370 		tmp |= DPM_EVENT_SRC(dpm_event_src);
1371 		WREG32_SMC(CG_THERMAL_CTRL, tmp);
1372 #endif
1373 
1374 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1375 		if (pi->thermal_protection)
1376 			tmp &= ~THERMAL_PROTECTION_DIS;
1377 		else
1378 			tmp |= THERMAL_PROTECTION_DIS;
1379 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1380 	} else {
1381 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1382 		tmp |= THERMAL_PROTECTION_DIS;
1383 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1384 	}
1385 }
1386 
1387 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1388 					   enum radeon_dpm_auto_throttle_src source,
1389 					   bool enable)
1390 {
1391 	struct ci_power_info *pi = ci_get_pi(rdev);
1392 
1393 	if (enable) {
1394 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1395 			pi->active_auto_throttle_sources |= 1 << source;
1396 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1397 		}
1398 	} else {
1399 		if (pi->active_auto_throttle_sources & (1 << source)) {
1400 			pi->active_auto_throttle_sources &= ~(1 << source);
1401 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1402 		}
1403 	}
1404 }
1405 
1406 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1407 {
1408 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1409 		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1410 }
1411 
1412 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1413 {
1414 	struct ci_power_info *pi = ci_get_pi(rdev);
1415 	PPSMC_Result smc_result;
1416 
1417 	if (!pi->need_update_smu7_dpm_table)
1418 		return 0;
1419 
1420 	if ((!pi->sclk_dpm_key_disabled) &&
1421 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1422 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1423 		if (smc_result != PPSMC_Result_OK)
1424 			return -EINVAL;
1425 	}
1426 
1427 	if ((!pi->mclk_dpm_key_disabled) &&
1428 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1429 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1430 		if (smc_result != PPSMC_Result_OK)
1431 			return -EINVAL;
1432 	}
1433 
1434 	pi->need_update_smu7_dpm_table = 0;
1435 	return 0;
1436 }
1437 
1438 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1439 {
1440 	struct ci_power_info *pi = ci_get_pi(rdev);
1441 	PPSMC_Result smc_result;
1442 
1443 	if (enable) {
1444 		if (!pi->sclk_dpm_key_disabled) {
1445 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1446 			if (smc_result != PPSMC_Result_OK)
1447 				return -EINVAL;
1448 		}
1449 
1450 		if (!pi->mclk_dpm_key_disabled) {
1451 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1452 			if (smc_result != PPSMC_Result_OK)
1453 				return -EINVAL;
1454 
1455 			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1456 
1457 			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1458 			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1459 			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1460 
1461 			udelay(10);
1462 
1463 			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1464 			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1465 			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1466 		}
1467 	} else {
1468 		if (!pi->sclk_dpm_key_disabled) {
1469 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1470 			if (smc_result != PPSMC_Result_OK)
1471 				return -EINVAL;
1472 		}
1473 
1474 		if (!pi->mclk_dpm_key_disabled) {
1475 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1476 			if (smc_result != PPSMC_Result_OK)
1477 				return -EINVAL;
1478 		}
1479 	}
1480 
1481 	return 0;
1482 }
1483 
1484 static int ci_start_dpm(struct radeon_device *rdev)
1485 {
1486 	struct ci_power_info *pi = ci_get_pi(rdev);
1487 	PPSMC_Result smc_result;
1488 	int ret;
1489 	u32 tmp;
1490 
1491 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1492 	tmp |= GLOBAL_PWRMGT_EN;
1493 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1494 
1495 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1496 	tmp |= DYNAMIC_PM_EN;
1497 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1498 
1499 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1500 
1501 	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1502 
1503 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1504 	if (smc_result != PPSMC_Result_OK)
1505 		return -EINVAL;
1506 
1507 	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1508 	if (ret)
1509 		return ret;
1510 
1511 	if (!pi->pcie_dpm_key_disabled) {
1512 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1513 		if (smc_result != PPSMC_Result_OK)
1514 			return -EINVAL;
1515 	}
1516 
1517 	return 0;
1518 }
1519 
1520 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1521 {
1522 	struct ci_power_info *pi = ci_get_pi(rdev);
1523 	PPSMC_Result smc_result;
1524 
1525 	if (!pi->need_update_smu7_dpm_table)
1526 		return 0;
1527 
1528 	if ((!pi->sclk_dpm_key_disabled) &&
1529 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1530 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1531 		if (smc_result != PPSMC_Result_OK)
1532 			return -EINVAL;
1533 	}
1534 
1535 	if ((!pi->mclk_dpm_key_disabled) &&
1536 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1537 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1538 		if (smc_result != PPSMC_Result_OK)
1539 			return -EINVAL;
1540 	}
1541 
1542 	return 0;
1543 }
1544 
1545 static int ci_stop_dpm(struct radeon_device *rdev)
1546 {
1547 	struct ci_power_info *pi = ci_get_pi(rdev);
1548 	PPSMC_Result smc_result;
1549 	int ret;
1550 	u32 tmp;
1551 
1552 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1553 	tmp &= ~GLOBAL_PWRMGT_EN;
1554 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1555 
1556 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1557 	tmp &= ~DYNAMIC_PM_EN;
1558 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1559 
1560 	if (!pi->pcie_dpm_key_disabled) {
1561 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1562 		if (smc_result != PPSMC_Result_OK)
1563 			return -EINVAL;
1564 	}
1565 
1566 	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1567 	if (ret)
1568 		return ret;
1569 
1570 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1571 	if (smc_result != PPSMC_Result_OK)
1572 		return -EINVAL;
1573 
1574 	return 0;
1575 }
1576 
1577 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1578 {
1579 	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1580 
1581 	if (enable)
1582 		tmp &= ~SCLK_PWRMGT_OFF;
1583 	else
1584 		tmp |= SCLK_PWRMGT_OFF;
1585 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1586 }
1587 
1588 #if 0
1589 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1590 					bool ac_power)
1591 {
1592 	struct ci_power_info *pi = ci_get_pi(rdev);
1593 	struct radeon_cac_tdp_table *cac_tdp_table =
1594 		rdev->pm.dpm.dyn_state.cac_tdp_table;
1595 	u32 power_limit;
1596 
1597 	if (ac_power)
1598 		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1599 	else
1600 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1601 
1602         ci_set_power_limit(rdev, power_limit);
1603 
1604 	if (pi->caps_automatic_dc_transition) {
1605 		if (ac_power)
1606 			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1607 		else
1608 			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1609 	}
1610 
1611 	return 0;
1612 }
1613 #endif
1614 
1615 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1616 						      PPSMC_Msg msg, u32 parameter)
1617 {
1618 	WREG32(SMC_MSG_ARG_0, parameter);
1619 	return ci_send_msg_to_smc(rdev, msg);
1620 }
1621 
1622 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1623 							PPSMC_Msg msg, u32 *parameter)
1624 {
1625 	PPSMC_Result smc_result;
1626 
1627 	smc_result = ci_send_msg_to_smc(rdev, msg);
1628 
1629 	if ((smc_result == PPSMC_Result_OK) && parameter)
1630 		*parameter = RREG32(SMC_MSG_ARG_0);
1631 
1632 	return smc_result;
1633 }
1634 
1635 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1636 {
1637 	struct ci_power_info *pi = ci_get_pi(rdev);
1638 
1639 	if (!pi->sclk_dpm_key_disabled) {
1640 		PPSMC_Result smc_result =
1641 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1642 		if (smc_result != PPSMC_Result_OK)
1643 			return -EINVAL;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1650 {
1651 	struct ci_power_info *pi = ci_get_pi(rdev);
1652 
1653 	if (!pi->mclk_dpm_key_disabled) {
1654 		PPSMC_Result smc_result =
1655 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1656 		if (smc_result != PPSMC_Result_OK)
1657 			return -EINVAL;
1658 	}
1659 
1660 	return 0;
1661 }
1662 
1663 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1664 {
1665 	struct ci_power_info *pi = ci_get_pi(rdev);
1666 
1667 	if (!pi->pcie_dpm_key_disabled) {
1668 		PPSMC_Result smc_result =
1669 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1670 		if (smc_result != PPSMC_Result_OK)
1671 			return -EINVAL;
1672 	}
1673 
1674 	return 0;
1675 }
1676 
1677 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1678 {
1679 	struct ci_power_info *pi = ci_get_pi(rdev);
1680 
1681 	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1682 		PPSMC_Result smc_result =
1683 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1684 		if (smc_result != PPSMC_Result_OK)
1685 			return -EINVAL;
1686 	}
1687 
1688 	return 0;
1689 }
1690 
1691 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1692 				       u32 target_tdp)
1693 {
1694 	PPSMC_Result smc_result =
1695 		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1696 	if (smc_result != PPSMC_Result_OK)
1697 		return -EINVAL;
1698 	return 0;
1699 }
1700 
1701 static int ci_set_boot_state(struct radeon_device *rdev)
1702 {
1703 	return ci_enable_sclk_mclk_dpm(rdev, false);
1704 }
1705 
1706 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1707 {
1708 	u32 sclk_freq;
1709 	PPSMC_Result smc_result =
1710 		ci_send_msg_to_smc_return_parameter(rdev,
1711 						    PPSMC_MSG_API_GetSclkFrequency,
1712 						    &sclk_freq);
1713 	if (smc_result != PPSMC_Result_OK)
1714 		sclk_freq = 0;
1715 
1716 	return sclk_freq;
1717 }
1718 
1719 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1720 {
1721 	u32 mclk_freq;
1722 	PPSMC_Result smc_result =
1723 		ci_send_msg_to_smc_return_parameter(rdev,
1724 						    PPSMC_MSG_API_GetMclkFrequency,
1725 						    &mclk_freq);
1726 	if (smc_result != PPSMC_Result_OK)
1727 		mclk_freq = 0;
1728 
1729 	return mclk_freq;
1730 }
1731 
1732 static void ci_dpm_start_smc(struct radeon_device *rdev)
1733 {
1734 	int i;
1735 
1736 	ci_program_jump_on_start(rdev);
1737 	ci_start_smc_clock(rdev);
1738 	ci_start_smc(rdev);
1739 	for (i = 0; i < rdev->usec_timeout; i++) {
1740 		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1741 			break;
1742 	}
1743 }
1744 
1745 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1746 {
1747 	ci_reset_smc(rdev);
1748 	ci_stop_smc_clock(rdev);
1749 }
1750 
1751 static int ci_process_firmware_header(struct radeon_device *rdev)
1752 {
1753 	struct ci_power_info *pi = ci_get_pi(rdev);
1754 	u32 tmp;
1755 	int ret;
1756 
1757 	ret = ci_read_smc_sram_dword(rdev,
1758 				     SMU7_FIRMWARE_HEADER_LOCATION +
1759 				     offsetof(SMU7_Firmware_Header, DpmTable),
1760 				     &tmp, pi->sram_end);
1761 	if (ret)
1762 		return ret;
1763 
1764 	pi->dpm_table_start = tmp;
1765 
1766 	ret = ci_read_smc_sram_dword(rdev,
1767 				     SMU7_FIRMWARE_HEADER_LOCATION +
1768 				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1769 				     &tmp, pi->sram_end);
1770 	if (ret)
1771 		return ret;
1772 
1773 	pi->soft_regs_start = tmp;
1774 
1775 	ret = ci_read_smc_sram_dword(rdev,
1776 				     SMU7_FIRMWARE_HEADER_LOCATION +
1777 				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1778 				     &tmp, pi->sram_end);
1779 	if (ret)
1780 		return ret;
1781 
1782 	pi->mc_reg_table_start = tmp;
1783 
1784 	ret = ci_read_smc_sram_dword(rdev,
1785 				     SMU7_FIRMWARE_HEADER_LOCATION +
1786 				     offsetof(SMU7_Firmware_Header, FanTable),
1787 				     &tmp, pi->sram_end);
1788 	if (ret)
1789 		return ret;
1790 
1791 	pi->fan_table_start = tmp;
1792 
1793 	ret = ci_read_smc_sram_dword(rdev,
1794 				     SMU7_FIRMWARE_HEADER_LOCATION +
1795 				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1796 				     &tmp, pi->sram_end);
1797 	if (ret)
1798 		return ret;
1799 
1800 	pi->arb_table_start = tmp;
1801 
1802 	return 0;
1803 }
1804 
1805 static void ci_read_clock_registers(struct radeon_device *rdev)
1806 {
1807 	struct ci_power_info *pi = ci_get_pi(rdev);
1808 
1809 	pi->clock_registers.cg_spll_func_cntl =
1810 		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1811 	pi->clock_registers.cg_spll_func_cntl_2 =
1812 		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1813 	pi->clock_registers.cg_spll_func_cntl_3 =
1814 		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1815 	pi->clock_registers.cg_spll_func_cntl_4 =
1816 		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1817 	pi->clock_registers.cg_spll_spread_spectrum =
1818 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1819 	pi->clock_registers.cg_spll_spread_spectrum_2 =
1820 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1821 	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1822 	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1823 	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1824 	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1825 	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1826 	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1827 	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1828 	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1829 	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1830 }
1831 
1832 static void ci_init_sclk_t(struct radeon_device *rdev)
1833 {
1834 	struct ci_power_info *pi = ci_get_pi(rdev);
1835 
1836 	pi->low_sclk_interrupt_t = 0;
1837 }
1838 
1839 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1840 					 bool enable)
1841 {
1842 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1843 
1844 	if (enable)
1845 		tmp &= ~THERMAL_PROTECTION_DIS;
1846 	else
1847 		tmp |= THERMAL_PROTECTION_DIS;
1848 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1849 }
1850 
1851 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1852 {
1853 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1854 
1855 	tmp |= STATIC_PM_EN;
1856 
1857 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1858 }
1859 
1860 #if 0
1861 static int ci_enter_ulp_state(struct radeon_device *rdev)
1862 {
1863 
1864 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1865 
1866 	udelay(25000);
1867 
1868 	return 0;
1869 }
1870 
1871 static int ci_exit_ulp_state(struct radeon_device *rdev)
1872 {
1873 	int i;
1874 
1875 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1876 
1877 	udelay(7000);
1878 
1879 	for (i = 0; i < rdev->usec_timeout; i++) {
1880 		if (RREG32(SMC_RESP_0) == 1)
1881 			break;
1882 		udelay(1000);
1883 	}
1884 
1885 	return 0;
1886 }
1887 #endif
1888 
1889 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1890 					bool has_display)
1891 {
1892 	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1893 
1894 	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1895 }
1896 
1897 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1898 				      bool enable)
1899 {
1900 	struct ci_power_info *pi = ci_get_pi(rdev);
1901 
1902 	if (enable) {
1903 		if (pi->caps_sclk_ds) {
1904 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1905 				return -EINVAL;
1906 		} else {
1907 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1908 				return -EINVAL;
1909 		}
1910 	} else {
1911 		if (pi->caps_sclk_ds) {
1912 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1913 				return -EINVAL;
1914 		}
1915 	}
1916 
1917 	return 0;
1918 }
1919 
1920 static void ci_program_display_gap(struct radeon_device *rdev)
1921 {
1922 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1923 	u32 pre_vbi_time_in_us;
1924 	u32 frame_time_in_us;
1925 	u32 ref_clock = rdev->clock.spll.reference_freq;
1926 	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1927 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1928 
1929 	tmp &= ~DISP_GAP_MASK;
1930 	if (rdev->pm.dpm.new_active_crtc_count > 0)
1931 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1932 	else
1933 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1934 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1935 
1936 	if (refresh_rate == 0)
1937 		refresh_rate = 60;
1938 	if (vblank_time == 0xffffffff)
1939 		vblank_time = 500;
1940 	frame_time_in_us = 1000000 / refresh_rate;
1941 	pre_vbi_time_in_us =
1942 		frame_time_in_us - 200 - vblank_time;
1943 	tmp = pre_vbi_time_in_us * (ref_clock / 100);
1944 
1945 	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1946 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1947 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1948 
1949 
1950 	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1951 
1952 }
1953 
1954 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1955 {
1956 	struct ci_power_info *pi = ci_get_pi(rdev);
1957 	u32 tmp;
1958 
1959 	if (enable) {
1960 		if (pi->caps_sclk_ss_support) {
1961 			tmp = RREG32_SMC(GENERAL_PWRMGT);
1962 			tmp |= DYN_SPREAD_SPECTRUM_EN;
1963 			WREG32_SMC(GENERAL_PWRMGT, tmp);
1964 		}
1965 	} else {
1966 		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1967 		tmp &= ~SSEN;
1968 		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1969 
1970 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1971 		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1972 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1973 	}
1974 }
1975 
1976 static void ci_program_sstp(struct radeon_device *rdev)
1977 {
1978 	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1979 }
1980 
1981 static void ci_enable_display_gap(struct radeon_device *rdev)
1982 {
1983 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1984 
1985         tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1986         tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1987                 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1988 
1989 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1990 }
1991 
1992 static void ci_program_vc(struct radeon_device *rdev)
1993 {
1994 	u32 tmp;
1995 
1996 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1997 	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1998 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1999 
2000 	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
2001 	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
2002 	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
2003 	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
2004 	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
2005 	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
2006 	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
2007 	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
2008 }
2009 
2010 static void ci_clear_vc(struct radeon_device *rdev)
2011 {
2012 	u32 tmp;
2013 
2014 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2015 	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
2016 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2017 
2018 	WREG32_SMC(CG_FTV_0, 0);
2019 	WREG32_SMC(CG_FTV_1, 0);
2020 	WREG32_SMC(CG_FTV_2, 0);
2021 	WREG32_SMC(CG_FTV_3, 0);
2022 	WREG32_SMC(CG_FTV_4, 0);
2023 	WREG32_SMC(CG_FTV_5, 0);
2024 	WREG32_SMC(CG_FTV_6, 0);
2025 	WREG32_SMC(CG_FTV_7, 0);
2026 }
2027 
2028 static int ci_upload_firmware(struct radeon_device *rdev)
2029 {
2030 	struct ci_power_info *pi = ci_get_pi(rdev);
2031 	int i, ret;
2032 
2033 	for (i = 0; i < rdev->usec_timeout; i++) {
2034 		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
2035 			break;
2036 	}
2037 	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
2038 
2039 	ci_stop_smc_clock(rdev);
2040 	ci_reset_smc(rdev);
2041 
2042 	ret = ci_load_smc_ucode(rdev, pi->sram_end);
2043 
2044 	return ret;
2045 
2046 }
2047 
2048 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
2049 				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
2050 				     struct atom_voltage_table *voltage_table)
2051 {
2052 	u32 i;
2053 
2054 	if (voltage_dependency_table == NULL)
2055 		return -EINVAL;
2056 
2057 	voltage_table->mask_low = 0;
2058 	voltage_table->phase_delay = 0;
2059 
2060 	voltage_table->count = voltage_dependency_table->count;
2061 	for (i = 0; i < voltage_table->count; i++) {
2062 		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2063 		voltage_table->entries[i].smio_low = 0;
2064 	}
2065 
2066 	return 0;
2067 }
2068 
2069 static int ci_construct_voltage_tables(struct radeon_device *rdev)
2070 {
2071 	struct ci_power_info *pi = ci_get_pi(rdev);
2072 	int ret;
2073 
2074 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2075 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
2076 						    VOLTAGE_OBJ_GPIO_LUT,
2077 						    &pi->vddc_voltage_table);
2078 		if (ret)
2079 			return ret;
2080 	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2081 		ret = ci_get_svi2_voltage_table(rdev,
2082 						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2083 						&pi->vddc_voltage_table);
2084 		if (ret)
2085 			return ret;
2086 	}
2087 
2088 	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2089 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
2090 							 &pi->vddc_voltage_table);
2091 
2092 	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2093 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
2094 						    VOLTAGE_OBJ_GPIO_LUT,
2095 						    &pi->vddci_voltage_table);
2096 		if (ret)
2097 			return ret;
2098 	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2099 		ret = ci_get_svi2_voltage_table(rdev,
2100 						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2101 						&pi->vddci_voltage_table);
2102 		if (ret)
2103 			return ret;
2104 	}
2105 
2106 	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2107 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
2108 							 &pi->vddci_voltage_table);
2109 
2110 	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2111 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
2112 						    VOLTAGE_OBJ_GPIO_LUT,
2113 						    &pi->mvdd_voltage_table);
2114 		if (ret)
2115 			return ret;
2116 	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2117 		ret = ci_get_svi2_voltage_table(rdev,
2118 						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2119 						&pi->mvdd_voltage_table);
2120 		if (ret)
2121 			return ret;
2122 	}
2123 
2124 	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2125 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
2126 							 &pi->mvdd_voltage_table);
2127 
2128 	return 0;
2129 }
2130 
2131 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
2132 					  struct atom_voltage_table_entry *voltage_table,
2133 					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
2134 {
2135 	int ret;
2136 
2137 	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
2138 					    &smc_voltage_table->StdVoltageHiSidd,
2139 					    &smc_voltage_table->StdVoltageLoSidd);
2140 
2141 	if (ret) {
2142 		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2143 		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2144 	}
2145 
2146 	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2147 	smc_voltage_table->StdVoltageHiSidd =
2148 		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2149 	smc_voltage_table->StdVoltageLoSidd =
2150 		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2151 }
2152 
2153 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
2154 				      SMU7_Discrete_DpmTable *table)
2155 {
2156 	struct ci_power_info *pi = ci_get_pi(rdev);
2157 	unsigned int count;
2158 
2159 	table->VddcLevelCount = pi->vddc_voltage_table.count;
2160 	for (count = 0; count < table->VddcLevelCount; count++) {
2161 		ci_populate_smc_voltage_table(rdev,
2162 					      &pi->vddc_voltage_table.entries[count],
2163 					      &table->VddcLevel[count]);
2164 
2165 		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2166 			table->VddcLevel[count].Smio |=
2167 				pi->vddc_voltage_table.entries[count].smio_low;
2168 		else
2169 			table->VddcLevel[count].Smio = 0;
2170 	}
2171 	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2172 
2173 	return 0;
2174 }
2175 
2176 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
2177 				       SMU7_Discrete_DpmTable *table)
2178 {
2179 	unsigned int count;
2180 	struct ci_power_info *pi = ci_get_pi(rdev);
2181 
2182 	table->VddciLevelCount = pi->vddci_voltage_table.count;
2183 	for (count = 0; count < table->VddciLevelCount; count++) {
2184 		ci_populate_smc_voltage_table(rdev,
2185 					      &pi->vddci_voltage_table.entries[count],
2186 					      &table->VddciLevel[count]);
2187 
2188 		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2189 			table->VddciLevel[count].Smio |=
2190 				pi->vddci_voltage_table.entries[count].smio_low;
2191 		else
2192 			table->VddciLevel[count].Smio = 0;
2193 	}
2194 	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2195 
2196 	return 0;
2197 }
2198 
2199 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
2200 				      SMU7_Discrete_DpmTable *table)
2201 {
2202 	struct ci_power_info *pi = ci_get_pi(rdev);
2203 	unsigned int count;
2204 
2205 	table->MvddLevelCount = pi->mvdd_voltage_table.count;
2206 	for (count = 0; count < table->MvddLevelCount; count++) {
2207 		ci_populate_smc_voltage_table(rdev,
2208 					      &pi->mvdd_voltage_table.entries[count],
2209 					      &table->MvddLevel[count]);
2210 
2211 		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2212 			table->MvddLevel[count].Smio |=
2213 				pi->mvdd_voltage_table.entries[count].smio_low;
2214 		else
2215 			table->MvddLevel[count].Smio = 0;
2216 	}
2217 	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2218 
2219 	return 0;
2220 }
2221 
2222 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
2223 					  SMU7_Discrete_DpmTable *table)
2224 {
2225 	int ret;
2226 
2227 	ret = ci_populate_smc_vddc_table(rdev, table);
2228 	if (ret)
2229 		return ret;
2230 
2231 	ret = ci_populate_smc_vddci_table(rdev, table);
2232 	if (ret)
2233 		return ret;
2234 
2235 	ret = ci_populate_smc_mvdd_table(rdev, table);
2236 	if (ret)
2237 		return ret;
2238 
2239 	return 0;
2240 }
2241 
2242 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
2243 				  SMU7_Discrete_VoltageLevel *voltage)
2244 {
2245 	struct ci_power_info *pi = ci_get_pi(rdev);
2246 	u32 i = 0;
2247 
2248 	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2249 		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2250 			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2251 				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2252 				break;
2253 			}
2254 		}
2255 
2256 		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2257 			return -EINVAL;
2258 	}
2259 
2260 	return -EINVAL;
2261 }
2262 
2263 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
2264 					 struct atom_voltage_table_entry *voltage_table,
2265 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2266 {
2267 	u16 v_index, idx;
2268 	bool voltage_found = false;
2269 	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2270 	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2271 
2272 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2273 		return -EINVAL;
2274 
2275 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2276 		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2277 			if (voltage_table->value ==
2278 			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2279 				voltage_found = true;
2280 				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2281 					idx = v_index;
2282 				else
2283 					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2284 				*std_voltage_lo_sidd =
2285 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2286 				*std_voltage_hi_sidd =
2287 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2288 				break;
2289 			}
2290 		}
2291 
2292 		if (!voltage_found) {
2293 			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2294 				if (voltage_table->value <=
2295 				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2296 					voltage_found = true;
2297 					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2298 						idx = v_index;
2299 					else
2300 						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2301 					*std_voltage_lo_sidd =
2302 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2303 					*std_voltage_hi_sidd =
2304 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2305 					break;
2306 				}
2307 			}
2308 		}
2309 	}
2310 
2311 	return 0;
2312 }
2313 
2314 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
2315 						  const struct radeon_phase_shedding_limits_table *limits,
2316 						  u32 sclk,
2317 						  u32 *phase_shedding)
2318 {
2319 	unsigned int i;
2320 
2321 	*phase_shedding = 1;
2322 
2323 	for (i = 0; i < limits->count; i++) {
2324 		if (sclk < limits->entries[i].sclk) {
2325 			*phase_shedding = i;
2326 			break;
2327 		}
2328 	}
2329 }
2330 
2331 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
2332 						  const struct radeon_phase_shedding_limits_table *limits,
2333 						  u32 mclk,
2334 						  u32 *phase_shedding)
2335 {
2336 	unsigned int i;
2337 
2338 	*phase_shedding = 1;
2339 
2340 	for (i = 0; i < limits->count; i++) {
2341 		if (mclk < limits->entries[i].mclk) {
2342 			*phase_shedding = i;
2343 			break;
2344 		}
2345 	}
2346 }
2347 
2348 static int ci_init_arb_table_index(struct radeon_device *rdev)
2349 {
2350 	struct ci_power_info *pi = ci_get_pi(rdev);
2351 	u32 tmp;
2352 	int ret;
2353 
2354 	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2355 				     &tmp, pi->sram_end);
2356 	if (ret)
2357 		return ret;
2358 
2359 	tmp &= 0x00FFFFFF;
2360 	tmp |= MC_CG_ARB_FREQ_F1 << 24;
2361 
2362 	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2363 				       tmp, pi->sram_end);
2364 }
2365 
2366 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2367 					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2368 					 u32 clock, u32 *voltage)
2369 {
2370 	u32 i = 0;
2371 
2372 	if (allowed_clock_voltage_table->count == 0)
2373 		return -EINVAL;
2374 
2375 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2376 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2377 			*voltage = allowed_clock_voltage_table->entries[i].v;
2378 			return 0;
2379 		}
2380 	}
2381 
2382 	*voltage = allowed_clock_voltage_table->entries[i-1].v;
2383 
2384 	return 0;
2385 }
2386 
2387 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2388 					     u32 sclk, u32 min_sclk_in_sr)
2389 {
2390 	u32 i;
2391 	u32 tmp;
2392 	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2393 		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2394 
2395 	if (sclk < min)
2396 		return 0;
2397 
2398 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2399 		tmp = sclk / (1 << i);
2400 		if (tmp >= min || i == 0)
2401 			break;
2402 	}
2403 
2404 	return (u8)i;
2405 }
2406 
2407 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2408 {
2409 	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2410 }
2411 
2412 static int ci_reset_to_default(struct radeon_device *rdev)
2413 {
2414 	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2415 		0 : -EINVAL;
2416 }
2417 
2418 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2419 {
2420 	u32 tmp;
2421 
2422 	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2423 
2424 	if (tmp == MC_CG_ARB_FREQ_F0)
2425 		return 0;
2426 
2427 	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2428 }
2429 
2430 static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2431 					const u32 engine_clock,
2432 					const u32 memory_clock,
2433 					u32 *dram_timimg2)
2434 {
2435 	bool patch;
2436 	u32 tmp, tmp2;
2437 
2438 	tmp = RREG32(MC_SEQ_MISC0);
2439 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2440 
2441 	if (patch &&
2442 	    ((rdev->pdev->device == 0x67B0) ||
2443 	     (rdev->pdev->device == 0x67B1))) {
2444 		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2445 			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2446 			*dram_timimg2 &= ~0x00ff0000;
2447 			*dram_timimg2 |= tmp2 << 16;
2448 		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2449 			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2450 			*dram_timimg2 &= ~0x00ff0000;
2451 			*dram_timimg2 |= tmp2 << 16;
2452 		}
2453 	}
2454 }
2455 
2456 
2457 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2458 						u32 sclk,
2459 						u32 mclk,
2460 						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2461 {
2462 	u32 dram_timing;
2463 	u32 dram_timing2;
2464 	u32 burst_time;
2465 
2466 	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2467 
2468 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2469 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2470 	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2471 
2472 	ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2473 
2474 	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2475 	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2476 	arb_regs->McArbBurstTime = (u8)burst_time;
2477 
2478 	return 0;
2479 }
2480 
2481 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2482 {
2483 	struct ci_power_info *pi = ci_get_pi(rdev);
2484 	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2485 	u32 i, j;
2486 	int ret =  0;
2487 
2488 	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2489 
2490 	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2491 		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2492 			ret = ci_populate_memory_timing_parameters(rdev,
2493 								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2494 								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2495 								   &arb_regs.entries[i][j]);
2496 			if (ret)
2497 				break;
2498 		}
2499 	}
2500 
2501 	if (ret == 0)
2502 		ret = ci_copy_bytes_to_smc(rdev,
2503 					   pi->arb_table_start,
2504 					   (u8 *)&arb_regs,
2505 					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2506 					   pi->sram_end);
2507 
2508 	return ret;
2509 }
2510 
2511 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2512 {
2513 	struct ci_power_info *pi = ci_get_pi(rdev);
2514 
2515 	if (pi->need_update_smu7_dpm_table == 0)
2516 		return 0;
2517 
2518 	return ci_do_program_memory_timing_parameters(rdev);
2519 }
2520 
2521 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2522 					  struct radeon_ps *radeon_boot_state)
2523 {
2524 	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2525 	struct ci_power_info *pi = ci_get_pi(rdev);
2526 	u32 level = 0;
2527 
2528 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2529 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2530 		    boot_state->performance_levels[0].sclk) {
2531 			pi->smc_state_table.GraphicsBootLevel = level;
2532 			break;
2533 		}
2534 	}
2535 
2536 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2537 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2538 		    boot_state->performance_levels[0].mclk) {
2539 			pi->smc_state_table.MemoryBootLevel = level;
2540 			break;
2541 		}
2542 	}
2543 }
2544 
2545 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2546 {
2547 	u32 i;
2548 	u32 mask_value = 0;
2549 
2550 	for (i = dpm_table->count; i > 0; i--) {
2551 		mask_value = mask_value << 1;
2552 		if (dpm_table->dpm_levels[i-1].enabled)
2553 			mask_value |= 0x1;
2554 		else
2555 			mask_value &= 0xFFFFFFFE;
2556 	}
2557 
2558 	return mask_value;
2559 }
2560 
2561 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2562 				       SMU7_Discrete_DpmTable *table)
2563 {
2564 	struct ci_power_info *pi = ci_get_pi(rdev);
2565 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2566 	u32 i;
2567 
2568 	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2569 		table->LinkLevel[i].PcieGenSpeed =
2570 			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2571 		table->LinkLevel[i].PcieLaneCount =
2572 			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2573 		table->LinkLevel[i].EnabledForActivity = 1;
2574 		table->LinkLevel[i].DownT = cpu_to_be32(5);
2575 		table->LinkLevel[i].UpT = cpu_to_be32(30);
2576 	}
2577 
2578 	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2579 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2580 		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2581 }
2582 
2583 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2584 				     SMU7_Discrete_DpmTable *table)
2585 {
2586 	u32 count;
2587 	struct atom_clock_dividers dividers;
2588 	int ret = -EINVAL;
2589 
2590 	table->UvdLevelCount =
2591 		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2592 
2593 	for (count = 0; count < table->UvdLevelCount; count++) {
2594 		table->UvdLevel[count].VclkFrequency =
2595 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2596 		table->UvdLevel[count].DclkFrequency =
2597 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2598 		table->UvdLevel[count].MinVddc =
2599 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2600 		table->UvdLevel[count].MinVddcPhases = 1;
2601 
2602 		ret = radeon_atom_get_clock_dividers(rdev,
2603 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2604 						     table->UvdLevel[count].VclkFrequency, false, &dividers);
2605 		if (ret)
2606 			return ret;
2607 
2608 		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2609 
2610 		ret = radeon_atom_get_clock_dividers(rdev,
2611 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2612 						     table->UvdLevel[count].DclkFrequency, false, &dividers);
2613 		if (ret)
2614 			return ret;
2615 
2616 		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2617 
2618 		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2619 		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2620 		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2621 	}
2622 
2623 	return ret;
2624 }
2625 
2626 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2627 				     SMU7_Discrete_DpmTable *table)
2628 {
2629 	u32 count;
2630 	struct atom_clock_dividers dividers;
2631 	int ret = -EINVAL;
2632 
2633 	table->VceLevelCount =
2634 		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2635 
2636 	for (count = 0; count < table->VceLevelCount; count++) {
2637 		table->VceLevel[count].Frequency =
2638 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2639 		table->VceLevel[count].MinVoltage =
2640 			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2641 		table->VceLevel[count].MinPhases = 1;
2642 
2643 		ret = radeon_atom_get_clock_dividers(rdev,
2644 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2645 						     table->VceLevel[count].Frequency, false, &dividers);
2646 		if (ret)
2647 			return ret;
2648 
2649 		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2650 
2651 		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2652 		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2653 	}
2654 
2655 	return ret;
2656 
2657 }
2658 
2659 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2660 				     SMU7_Discrete_DpmTable *table)
2661 {
2662 	u32 count;
2663 	struct atom_clock_dividers dividers;
2664 	int ret = -EINVAL;
2665 
2666 	table->AcpLevelCount = (u8)
2667 		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2668 
2669 	for (count = 0; count < table->AcpLevelCount; count++) {
2670 		table->AcpLevel[count].Frequency =
2671 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2672 		table->AcpLevel[count].MinVoltage =
2673 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2674 		table->AcpLevel[count].MinPhases = 1;
2675 
2676 		ret = radeon_atom_get_clock_dividers(rdev,
2677 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2678 						     table->AcpLevel[count].Frequency, false, &dividers);
2679 		if (ret)
2680 			return ret;
2681 
2682 		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2683 
2684 		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2685 		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2686 	}
2687 
2688 	return ret;
2689 }
2690 
2691 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2692 				      SMU7_Discrete_DpmTable *table)
2693 {
2694 	u32 count;
2695 	struct atom_clock_dividers dividers;
2696 	int ret = -EINVAL;
2697 
2698 	table->SamuLevelCount =
2699 		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2700 
2701 	for (count = 0; count < table->SamuLevelCount; count++) {
2702 		table->SamuLevel[count].Frequency =
2703 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2704 		table->SamuLevel[count].MinVoltage =
2705 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2706 		table->SamuLevel[count].MinPhases = 1;
2707 
2708 		ret = radeon_atom_get_clock_dividers(rdev,
2709 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2710 						     table->SamuLevel[count].Frequency, false, &dividers);
2711 		if (ret)
2712 			return ret;
2713 
2714 		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2715 
2716 		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2717 		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2718 	}
2719 
2720 	return ret;
2721 }
2722 
2723 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2724 				    u32 memory_clock,
2725 				    SMU7_Discrete_MemoryLevel *mclk,
2726 				    bool strobe_mode,
2727 				    bool dll_state_on)
2728 {
2729 	struct ci_power_info *pi = ci_get_pi(rdev);
2730 	u32  dll_cntl = pi->clock_registers.dll_cntl;
2731 	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2732 	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2733 	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2734 	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2735 	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2736 	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2737 	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2738 	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2739 	struct atom_mpll_param mpll_param;
2740 	int ret;
2741 
2742 	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2743 	if (ret)
2744 		return ret;
2745 
2746 	mpll_func_cntl &= ~BWCTRL_MASK;
2747 	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2748 
2749 	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2750 	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2751 		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2752 
2753 	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2754 	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2755 
2756 	if (pi->mem_gddr5) {
2757 		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2758 		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2759 			YCLK_POST_DIV(mpll_param.post_div);
2760 	}
2761 
2762 	if (pi->caps_mclk_ss_support) {
2763 		struct radeon_atom_ss ss;
2764 		u32 freq_nom;
2765 		u32 tmp;
2766 		u32 reference_clock = rdev->clock.mpll.reference_freq;
2767 
2768 		if (mpll_param.qdr == 1)
2769 			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2770 		else
2771 			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2772 
2773 		tmp = (freq_nom / reference_clock);
2774 		tmp = tmp * tmp;
2775 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2776 						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2777 			u32 clks = reference_clock * 5 / ss.rate;
2778 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2779 
2780 			mpll_ss1 &= ~CLKV_MASK;
2781 			mpll_ss1 |= CLKV(clkv);
2782 
2783 			mpll_ss2 &= ~CLKS_MASK;
2784 			mpll_ss2 |= CLKS(clks);
2785 		}
2786 	}
2787 
2788 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2789 	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2790 
2791 	if (dll_state_on)
2792 		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2793 	else
2794 		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2795 
2796 	mclk->MclkFrequency = memory_clock;
2797 	mclk->MpllFuncCntl = mpll_func_cntl;
2798 	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2799 	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2800 	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2801 	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2802 	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2803 	mclk->DllCntl = dll_cntl;
2804 	mclk->MpllSs1 = mpll_ss1;
2805 	mclk->MpllSs2 = mpll_ss2;
2806 
2807 	return 0;
2808 }
2809 
2810 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2811 					   u32 memory_clock,
2812 					   SMU7_Discrete_MemoryLevel *memory_level)
2813 {
2814 	struct ci_power_info *pi = ci_get_pi(rdev);
2815 	int ret;
2816 	bool dll_state_on;
2817 
2818 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2819 		ret = ci_get_dependency_volt_by_clk(rdev,
2820 						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2821 						    memory_clock, &memory_level->MinVddc);
2822 		if (ret)
2823 			return ret;
2824 	}
2825 
2826 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2827 		ret = ci_get_dependency_volt_by_clk(rdev,
2828 						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2829 						    memory_clock, &memory_level->MinVddci);
2830 		if (ret)
2831 			return ret;
2832 	}
2833 
2834 	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2835 		ret = ci_get_dependency_volt_by_clk(rdev,
2836 						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2837 						    memory_clock, &memory_level->MinMvdd);
2838 		if (ret)
2839 			return ret;
2840 	}
2841 
2842 	memory_level->MinVddcPhases = 1;
2843 
2844 	if (pi->vddc_phase_shed_control)
2845 		ci_populate_phase_value_based_on_mclk(rdev,
2846 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2847 						      memory_clock,
2848 						      &memory_level->MinVddcPhases);
2849 
2850 	memory_level->EnabledForThrottle = 1;
2851 	memory_level->UpH = 0;
2852 	memory_level->DownH = 100;
2853 	memory_level->VoltageDownH = 0;
2854 	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2855 
2856 	memory_level->StutterEnable = false;
2857 	memory_level->StrobeEnable = false;
2858 	memory_level->EdcReadEnable = false;
2859 	memory_level->EdcWriteEnable = false;
2860 	memory_level->RttEnable = false;
2861 
2862 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2863 
2864 	if (pi->mclk_stutter_mode_threshold &&
2865 	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2866 	    (pi->uvd_enabled == false) &&
2867 	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2868 	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2869 		memory_level->StutterEnable = true;
2870 
2871 	if (pi->mclk_strobe_mode_threshold &&
2872 	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2873 		memory_level->StrobeEnable = 1;
2874 
2875 	if (pi->mem_gddr5) {
2876 		memory_level->StrobeRatio =
2877 			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2878 		if (pi->mclk_edc_enable_threshold &&
2879 		    (memory_clock > pi->mclk_edc_enable_threshold))
2880 			memory_level->EdcReadEnable = true;
2881 
2882 		if (pi->mclk_edc_wr_enable_threshold &&
2883 		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2884 			memory_level->EdcWriteEnable = true;
2885 
2886 		if (memory_level->StrobeEnable) {
2887 			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2888 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2889 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2890 			else
2891 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2892 		} else {
2893 			dll_state_on = pi->dll_default_on;
2894 		}
2895 	} else {
2896 		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2897 		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2898 	}
2899 
2900 	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2901 	if (ret)
2902 		return ret;
2903 
2904 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2905 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2906         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2907         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2908 
2909 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2910 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2911 	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2912 	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2913 	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2914 	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2915 	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2916 	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2917 	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2918 	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2919 	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2920 
2921 	return 0;
2922 }
2923 
2924 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2925 				      SMU7_Discrete_DpmTable *table)
2926 {
2927 	struct ci_power_info *pi = ci_get_pi(rdev);
2928 	struct atom_clock_dividers dividers;
2929 	SMU7_Discrete_VoltageLevel voltage_level;
2930 	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2931 	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2932 	u32 dll_cntl = pi->clock_registers.dll_cntl;
2933 	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2934 	int ret;
2935 
2936 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2937 
2938 	if (pi->acpi_vddc)
2939 		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2940 	else
2941 		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2942 
2943 	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2944 
2945 	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2946 
2947 	ret = radeon_atom_get_clock_dividers(rdev,
2948 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2949 					     table->ACPILevel.SclkFrequency, false, &dividers);
2950 	if (ret)
2951 		return ret;
2952 
2953 	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2954 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2955 	table->ACPILevel.DeepSleepDivId = 0;
2956 
2957 	spll_func_cntl &= ~SPLL_PWRON;
2958 	spll_func_cntl |= SPLL_RESET;
2959 
2960 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2961 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2962 
2963 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2964 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2965 	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2966 	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2967 	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2968 	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2969 	table->ACPILevel.CcPwrDynRm = 0;
2970 	table->ACPILevel.CcPwrDynRm1 = 0;
2971 
2972 	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2973 	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2974 	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2975 	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2976 	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2977 	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2978 	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2979 	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2980 	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2981 	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2982 	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2983 
2984 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2985 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2986 
2987 	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2988 		if (pi->acpi_vddci)
2989 			table->MemoryACPILevel.MinVddci =
2990 				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2991 		else
2992 			table->MemoryACPILevel.MinVddci =
2993 				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2994 	}
2995 
2996 	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2997 		table->MemoryACPILevel.MinMvdd = 0;
2998 	else
2999 		table->MemoryACPILevel.MinMvdd =
3000 			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3001 
3002 	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
3003 	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
3004 
3005 	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
3006 
3007 	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3008 	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3009 	table->MemoryACPILevel.MpllAdFuncCntl =
3010 		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3011 	table->MemoryACPILevel.MpllDqFuncCntl =
3012 		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3013 	table->MemoryACPILevel.MpllFuncCntl =
3014 		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3015 	table->MemoryACPILevel.MpllFuncCntl_1 =
3016 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3017 	table->MemoryACPILevel.MpllFuncCntl_2 =
3018 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3019 	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3020 	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3021 
3022 	table->MemoryACPILevel.EnabledForThrottle = 0;
3023 	table->MemoryACPILevel.EnabledForActivity = 0;
3024 	table->MemoryACPILevel.UpH = 0;
3025 	table->MemoryACPILevel.DownH = 100;
3026 	table->MemoryACPILevel.VoltageDownH = 0;
3027 	table->MemoryACPILevel.ActivityLevel =
3028 		cpu_to_be16((u16)pi->mclk_activity_target);
3029 
3030 	table->MemoryACPILevel.StutterEnable = false;
3031 	table->MemoryACPILevel.StrobeEnable = false;
3032 	table->MemoryACPILevel.EdcReadEnable = false;
3033 	table->MemoryACPILevel.EdcWriteEnable = false;
3034 	table->MemoryACPILevel.RttEnable = false;
3035 
3036 	return 0;
3037 }
3038 
3039 
3040 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
3041 {
3042 	struct ci_power_info *pi = ci_get_pi(rdev);
3043 	struct ci_ulv_parm *ulv = &pi->ulv;
3044 
3045 	if (ulv->supported) {
3046 		if (enable)
3047 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3048 				0 : -EINVAL;
3049 		else
3050 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3051 				0 : -EINVAL;
3052 	}
3053 
3054 	return 0;
3055 }
3056 
3057 static int ci_populate_ulv_level(struct radeon_device *rdev,
3058 				 SMU7_Discrete_Ulv *state)
3059 {
3060 	struct ci_power_info *pi = ci_get_pi(rdev);
3061 	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
3062 
3063 	state->CcPwrDynRm = 0;
3064 	state->CcPwrDynRm1 = 0;
3065 
3066 	if (ulv_voltage == 0) {
3067 		pi->ulv.supported = false;
3068 		return 0;
3069 	}
3070 
3071 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3072 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3073 			state->VddcOffset = 0;
3074 		else
3075 			state->VddcOffset =
3076 				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3077 	} else {
3078 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3079 			state->VddcOffsetVid = 0;
3080 		else
3081 			state->VddcOffsetVid = (u8)
3082 				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3083 				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3084 	}
3085 	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3086 
3087 	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3088 	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3089 	state->VddcOffset = cpu_to_be16(state->VddcOffset);
3090 
3091 	return 0;
3092 }
3093 
3094 static int ci_calculate_sclk_params(struct radeon_device *rdev,
3095 				    u32 engine_clock,
3096 				    SMU7_Discrete_GraphicsLevel *sclk)
3097 {
3098 	struct ci_power_info *pi = ci_get_pi(rdev);
3099 	struct atom_clock_dividers dividers;
3100 	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3101 	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3102 	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3103 	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3104 	u32 reference_clock = rdev->clock.spll.reference_freq;
3105 	u32 reference_divider;
3106 	u32 fbdiv;
3107 	int ret;
3108 
3109 	ret = radeon_atom_get_clock_dividers(rdev,
3110 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3111 					     engine_clock, false, &dividers);
3112 	if (ret)
3113 		return ret;
3114 
3115 	reference_divider = 1 + dividers.ref_div;
3116 	fbdiv = dividers.fb_div & 0x3FFFFFF;
3117 
3118 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
3119 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
3120         spll_func_cntl_3 |= SPLL_DITHEN;
3121 
3122 	if (pi->caps_sclk_ss_support) {
3123 		struct radeon_atom_ss ss;
3124 		u32 vco_freq = engine_clock * dividers.post_div;
3125 
3126 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
3127 						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3128 			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3129 			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3130 
3131 			cg_spll_spread_spectrum &= ~CLK_S_MASK;
3132 			cg_spll_spread_spectrum |= CLK_S(clk_s);
3133 			cg_spll_spread_spectrum |= SSEN;
3134 
3135 			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
3136 			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
3137 		}
3138 	}
3139 
3140 	sclk->SclkFrequency = engine_clock;
3141 	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3142 	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3143 	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3144 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3145 	sclk->SclkDid = (u8)dividers.post_divider;
3146 
3147 	return 0;
3148 }
3149 
3150 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
3151 					    u32 engine_clock,
3152 					    u16 sclk_activity_level_t,
3153 					    SMU7_Discrete_GraphicsLevel *graphic_level)
3154 {
3155 	struct ci_power_info *pi = ci_get_pi(rdev);
3156 	int ret;
3157 
3158 	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
3159 	if (ret)
3160 		return ret;
3161 
3162 	ret = ci_get_dependency_volt_by_clk(rdev,
3163 					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3164 					    engine_clock, &graphic_level->MinVddc);
3165 	if (ret)
3166 		return ret;
3167 
3168 	graphic_level->SclkFrequency = engine_clock;
3169 
3170 	graphic_level->Flags =  0;
3171 	graphic_level->MinVddcPhases = 1;
3172 
3173 	if (pi->vddc_phase_shed_control)
3174 		ci_populate_phase_value_based_on_sclk(rdev,
3175 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
3176 						      engine_clock,
3177 						      &graphic_level->MinVddcPhases);
3178 
3179 	graphic_level->ActivityLevel = sclk_activity_level_t;
3180 
3181 	graphic_level->CcPwrDynRm = 0;
3182 	graphic_level->CcPwrDynRm1 = 0;
3183 	graphic_level->EnabledForThrottle = 1;
3184 	graphic_level->UpH = 0;
3185 	graphic_level->DownH = 0;
3186 	graphic_level->VoltageDownH = 0;
3187 	graphic_level->PowerThrottle = 0;
3188 
3189 	if (pi->caps_sclk_ds)
3190 		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
3191 										   engine_clock,
3192 										   CISLAND_MINIMUM_ENGINE_CLOCK);
3193 
3194 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3195 
3196 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3197         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3198 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3199 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3200 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3201 	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3202 	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3203 	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3204 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3205 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3206 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3207 
3208 	return 0;
3209 }
3210 
3211 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
3212 {
3213 	struct ci_power_info *pi = ci_get_pi(rdev);
3214 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3215 	u32 level_array_address = pi->dpm_table_start +
3216 		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3217 	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3218 		SMU7_MAX_LEVELS_GRAPHICS;
3219 	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3220 	u32 i, ret;
3221 
3222 	memset(levels, 0, level_array_size);
3223 
3224 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
3225 		ret = ci_populate_single_graphic_level(rdev,
3226 						       dpm_table->sclk_table.dpm_levels[i].value,
3227 						       (u16)pi->activity_target[i],
3228 						       &pi->smc_state_table.GraphicsLevel[i]);
3229 		if (ret)
3230 			return ret;
3231 		if (i > 1)
3232 			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3233 		if (i == (dpm_table->sclk_table.count - 1))
3234 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3235 				PPSMC_DISPLAY_WATERMARK_HIGH;
3236 	}
3237 	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3238 
3239 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3240 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3241 		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3242 
3243 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3244 				   (u8 *)levels, level_array_size,
3245 				   pi->sram_end);
3246 	if (ret)
3247 		return ret;
3248 
3249 	return 0;
3250 }
3251 
3252 static int ci_populate_ulv_state(struct radeon_device *rdev,
3253 				 SMU7_Discrete_Ulv *ulv_level)
3254 {
3255 	return ci_populate_ulv_level(rdev, ulv_level);
3256 }
3257 
3258 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
3259 {
3260 	struct ci_power_info *pi = ci_get_pi(rdev);
3261 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3262 	u32 level_array_address = pi->dpm_table_start +
3263 		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3264 	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3265 		SMU7_MAX_LEVELS_MEMORY;
3266 	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3267 	u32 i, ret;
3268 
3269 	memset(levels, 0, level_array_size);
3270 
3271 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
3272 		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3273 			return -EINVAL;
3274 		ret = ci_populate_single_memory_level(rdev,
3275 						      dpm_table->mclk_table.dpm_levels[i].value,
3276 						      &pi->smc_state_table.MemoryLevel[i]);
3277 		if (ret)
3278 			return ret;
3279 	}
3280 
3281 	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3282 
3283 	if ((dpm_table->mclk_table.count >= 2) &&
3284 	    ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3285 		pi->smc_state_table.MemoryLevel[1].MinVddc =
3286 			pi->smc_state_table.MemoryLevel[0].MinVddc;
3287 		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3288 			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3289 	}
3290 
3291 	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3292 
3293 	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3294 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3295 		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3296 
3297 	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3298 		PPSMC_DISPLAY_WATERMARK_HIGH;
3299 
3300 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3301 				   (u8 *)levels, level_array_size,
3302 				   pi->sram_end);
3303 	if (ret)
3304 		return ret;
3305 
3306 	return 0;
3307 }
3308 
3309 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
3310 				      struct ci_single_dpm_table* dpm_table,
3311 				      u32 count)
3312 {
3313 	u32 i;
3314 
3315 	dpm_table->count = count;
3316 	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3317 		dpm_table->dpm_levels[i].enabled = false;
3318 }
3319 
3320 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3321 				      u32 index, u32 pcie_gen, u32 pcie_lanes)
3322 {
3323 	dpm_table->dpm_levels[index].value = pcie_gen;
3324 	dpm_table->dpm_levels[index].param1 = pcie_lanes;
3325 	dpm_table->dpm_levels[index].enabled = true;
3326 }
3327 
3328 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
3329 {
3330 	struct ci_power_info *pi = ci_get_pi(rdev);
3331 
3332 	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3333 		return -EINVAL;
3334 
3335 	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3336 		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3337 		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3338 	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3339 		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3340 		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3341 	}
3342 
3343 	ci_reset_single_dpm_table(rdev,
3344 				  &pi->dpm_table.pcie_speed_table,
3345 				  SMU7_MAX_LEVELS_LINK);
3346 
3347 	if (rdev->family == CHIP_BONAIRE)
3348 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3349 					  pi->pcie_gen_powersaving.min,
3350 					  pi->pcie_lane_powersaving.max);
3351 	else
3352 		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3353 					  pi->pcie_gen_powersaving.min,
3354 					  pi->pcie_lane_powersaving.min);
3355 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3356 				  pi->pcie_gen_performance.min,
3357 				  pi->pcie_lane_performance.min);
3358 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3359 				  pi->pcie_gen_powersaving.min,
3360 				  pi->pcie_lane_powersaving.max);
3361 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3362 				  pi->pcie_gen_performance.min,
3363 				  pi->pcie_lane_performance.max);
3364 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3365 				  pi->pcie_gen_powersaving.max,
3366 				  pi->pcie_lane_powersaving.max);
3367 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3368 				  pi->pcie_gen_performance.max,
3369 				  pi->pcie_lane_performance.max);
3370 
3371 	pi->dpm_table.pcie_speed_table.count = 6;
3372 
3373 	return 0;
3374 }
3375 
3376 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3377 {
3378 	struct ci_power_info *pi = ci_get_pi(rdev);
3379 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3380 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3381 	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3382 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3383 	struct radeon_cac_leakage_table *std_voltage_table =
3384 		&rdev->pm.dpm.dyn_state.cac_leakage_table;
3385 	u32 i;
3386 
3387 	if (allowed_sclk_vddc_table == NULL)
3388 		return -EINVAL;
3389 	if (allowed_sclk_vddc_table->count < 1)
3390 		return -EINVAL;
3391 	if (allowed_mclk_table == NULL)
3392 		return -EINVAL;
3393 	if (allowed_mclk_table->count < 1)
3394 		return -EINVAL;
3395 
3396 	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3397 
3398 	ci_reset_single_dpm_table(rdev,
3399 				  &pi->dpm_table.sclk_table,
3400 				  SMU7_MAX_LEVELS_GRAPHICS);
3401 	ci_reset_single_dpm_table(rdev,
3402 				  &pi->dpm_table.mclk_table,
3403 				  SMU7_MAX_LEVELS_MEMORY);
3404 	ci_reset_single_dpm_table(rdev,
3405 				  &pi->dpm_table.vddc_table,
3406 				  SMU7_MAX_LEVELS_VDDC);
3407 	ci_reset_single_dpm_table(rdev,
3408 				  &pi->dpm_table.vddci_table,
3409 				  SMU7_MAX_LEVELS_VDDCI);
3410 	ci_reset_single_dpm_table(rdev,
3411 				  &pi->dpm_table.mvdd_table,
3412 				  SMU7_MAX_LEVELS_MVDD);
3413 
3414 	pi->dpm_table.sclk_table.count = 0;
3415 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3416 		if ((i == 0) ||
3417 		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3418 		     allowed_sclk_vddc_table->entries[i].clk)) {
3419 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3420 				allowed_sclk_vddc_table->entries[i].clk;
3421 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3422 				(i == 0) ? true : false;
3423 			pi->dpm_table.sclk_table.count++;
3424 		}
3425 	}
3426 
3427 	pi->dpm_table.mclk_table.count = 0;
3428 	for (i = 0; i < allowed_mclk_table->count; i++) {
3429 		if ((i == 0) ||
3430 		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3431 		     allowed_mclk_table->entries[i].clk)) {
3432 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3433 				allowed_mclk_table->entries[i].clk;
3434 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3435 				(i == 0) ? true : false;
3436 			pi->dpm_table.mclk_table.count++;
3437 		}
3438 	}
3439 
3440 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3441 		pi->dpm_table.vddc_table.dpm_levels[i].value =
3442 			allowed_sclk_vddc_table->entries[i].v;
3443 		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3444 			std_voltage_table->entries[i].leakage;
3445 		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3446 	}
3447 	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3448 
3449 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3450 	if (allowed_mclk_table) {
3451 		for (i = 0; i < allowed_mclk_table->count; i++) {
3452 			pi->dpm_table.vddci_table.dpm_levels[i].value =
3453 				allowed_mclk_table->entries[i].v;
3454 			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3455 		}
3456 		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3457 	}
3458 
3459 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3460 	if (allowed_mclk_table) {
3461 		for (i = 0; i < allowed_mclk_table->count; i++) {
3462 			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3463 				allowed_mclk_table->entries[i].v;
3464 			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3465 		}
3466 		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3467 	}
3468 
3469 	ci_setup_default_pcie_tables(rdev);
3470 
3471 	return 0;
3472 }
3473 
3474 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3475 			      u32 value, u32 *boot_level)
3476 {
3477 	u32 i;
3478 	int ret = -EINVAL;
3479 
3480 	for(i = 0; i < table->count; i++) {
3481 		if (value == table->dpm_levels[i].value) {
3482 			*boot_level = i;
3483 			ret = 0;
3484 		}
3485 	}
3486 
3487 	return ret;
3488 }
3489 
3490 static int ci_init_smc_table(struct radeon_device *rdev)
3491 {
3492 	struct ci_power_info *pi = ci_get_pi(rdev);
3493 	struct ci_ulv_parm *ulv = &pi->ulv;
3494 	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3495 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3496 	int ret;
3497 
3498 	ret = ci_setup_default_dpm_tables(rdev);
3499 	if (ret)
3500 		return ret;
3501 
3502 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3503 		ci_populate_smc_voltage_tables(rdev, table);
3504 
3505 	ci_init_fps_limits(rdev);
3506 
3507 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3508 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3509 
3510 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3511 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3512 
3513 	if (pi->mem_gddr5)
3514 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3515 
3516 	if (ulv->supported) {
3517 		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3518 		if (ret)
3519 			return ret;
3520 		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3521 	}
3522 
3523 	ret = ci_populate_all_graphic_levels(rdev);
3524 	if (ret)
3525 		return ret;
3526 
3527 	ret = ci_populate_all_memory_levels(rdev);
3528 	if (ret)
3529 		return ret;
3530 
3531 	ci_populate_smc_link_level(rdev, table);
3532 
3533 	ret = ci_populate_smc_acpi_level(rdev, table);
3534 	if (ret)
3535 		return ret;
3536 
3537 	ret = ci_populate_smc_vce_level(rdev, table);
3538 	if (ret)
3539 		return ret;
3540 
3541 	ret = ci_populate_smc_acp_level(rdev, table);
3542 	if (ret)
3543 		return ret;
3544 
3545 	ret = ci_populate_smc_samu_level(rdev, table);
3546 	if (ret)
3547 		return ret;
3548 
3549 	ret = ci_do_program_memory_timing_parameters(rdev);
3550 	if (ret)
3551 		return ret;
3552 
3553 	ret = ci_populate_smc_uvd_level(rdev, table);
3554 	if (ret)
3555 		return ret;
3556 
3557 	table->UvdBootLevel  = 0;
3558 	table->VceBootLevel  = 0;
3559 	table->AcpBootLevel  = 0;
3560 	table->SamuBootLevel  = 0;
3561 	table->GraphicsBootLevel  = 0;
3562 	table->MemoryBootLevel  = 0;
3563 
3564 	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3565 				 pi->vbios_boot_state.sclk_bootup_value,
3566 				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3567 
3568 	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3569 				 pi->vbios_boot_state.mclk_bootup_value,
3570 				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3571 
3572 	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3573 	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3574 	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3575 
3576 	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3577 
3578 	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3579 	if (ret)
3580 		return ret;
3581 
3582 	table->UVDInterval = 1;
3583 	table->VCEInterval = 1;
3584 	table->ACPInterval = 1;
3585 	table->SAMUInterval = 1;
3586 	table->GraphicsVoltageChangeEnable = 1;
3587 	table->GraphicsThermThrottleEnable = 1;
3588 	table->GraphicsInterval = 1;
3589 	table->VoltageInterval = 1;
3590 	table->ThermalInterval = 1;
3591 	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3592 					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3593 	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3594 					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3595 	table->MemoryVoltageChangeEnable = 1;
3596 	table->MemoryInterval = 1;
3597 	table->VoltageResponseTime = 0;
3598 	table->VddcVddciDelta = 4000;
3599 	table->PhaseResponseTime = 0;
3600 	table->MemoryThermThrottleEnable = 1;
3601 	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3602 	table->PCIeGenInterval = 1;
3603 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3604 		table->SVI2Enable  = 1;
3605 	else
3606 		table->SVI2Enable  = 0;
3607 
3608 	table->ThermGpio = 17;
3609 	table->SclkStepSize = 0x4000;
3610 
3611 	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3612 	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3613 	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3614 	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3615 	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3616 	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3617 	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3618 	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3619 	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3620 	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3621 	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3622 	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3623 	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3624 	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3625 
3626 	ret = ci_copy_bytes_to_smc(rdev,
3627 				   pi->dpm_table_start +
3628 				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3629 				   (u8 *)&table->SystemFlags,
3630 				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3631 				   pi->sram_end);
3632 	if (ret)
3633 		return ret;
3634 
3635 	return 0;
3636 }
3637 
3638 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3639 				      struct ci_single_dpm_table *dpm_table,
3640 				      u32 low_limit, u32 high_limit)
3641 {
3642 	u32 i;
3643 
3644 	for (i = 0; i < dpm_table->count; i++) {
3645 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3646 		    (dpm_table->dpm_levels[i].value > high_limit))
3647 			dpm_table->dpm_levels[i].enabled = false;
3648 		else
3649 			dpm_table->dpm_levels[i].enabled = true;
3650 	}
3651 }
3652 
3653 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3654 				    u32 speed_low, u32 lanes_low,
3655 				    u32 speed_high, u32 lanes_high)
3656 {
3657 	struct ci_power_info *pi = ci_get_pi(rdev);
3658 	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3659 	u32 i, j;
3660 
3661 	for (i = 0; i < pcie_table->count; i++) {
3662 		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3663 		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3664 		    (pcie_table->dpm_levels[i].value > speed_high) ||
3665 		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3666 			pcie_table->dpm_levels[i].enabled = false;
3667 		else
3668 			pcie_table->dpm_levels[i].enabled = true;
3669 	}
3670 
3671 	for (i = 0; i < pcie_table->count; i++) {
3672 		if (pcie_table->dpm_levels[i].enabled) {
3673 			for (j = i + 1; j < pcie_table->count; j++) {
3674 				if (pcie_table->dpm_levels[j].enabled) {
3675 					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3676 					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3677 						pcie_table->dpm_levels[j].enabled = false;
3678 				}
3679 			}
3680 		}
3681 	}
3682 }
3683 
3684 static int ci_trim_dpm_states(struct radeon_device *rdev,
3685 			      struct radeon_ps *radeon_state)
3686 {
3687 	struct ci_ps *state = ci_get_ps(radeon_state);
3688 	struct ci_power_info *pi = ci_get_pi(rdev);
3689 	u32 high_limit_count;
3690 
3691 	if (state->performance_level_count < 1)
3692 		return -EINVAL;
3693 
3694 	if (state->performance_level_count == 1)
3695 		high_limit_count = 0;
3696 	else
3697 		high_limit_count = 1;
3698 
3699 	ci_trim_single_dpm_states(rdev,
3700 				  &pi->dpm_table.sclk_table,
3701 				  state->performance_levels[0].sclk,
3702 				  state->performance_levels[high_limit_count].sclk);
3703 
3704 	ci_trim_single_dpm_states(rdev,
3705 				  &pi->dpm_table.mclk_table,
3706 				  state->performance_levels[0].mclk,
3707 				  state->performance_levels[high_limit_count].mclk);
3708 
3709 	ci_trim_pcie_dpm_states(rdev,
3710 				state->performance_levels[0].pcie_gen,
3711 				state->performance_levels[0].pcie_lane,
3712 				state->performance_levels[high_limit_count].pcie_gen,
3713 				state->performance_levels[high_limit_count].pcie_lane);
3714 
3715 	return 0;
3716 }
3717 
3718 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3719 {
3720 	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3721 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3722 	struct radeon_clock_voltage_dependency_table *vddc_table =
3723 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3724 	u32 requested_voltage = 0;
3725 	u32 i;
3726 
3727 	if (disp_voltage_table == NULL)
3728 		return -EINVAL;
3729 	if (!disp_voltage_table->count)
3730 		return -EINVAL;
3731 
3732 	for (i = 0; i < disp_voltage_table->count; i++) {
3733 		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3734 			requested_voltage = disp_voltage_table->entries[i].v;
3735 	}
3736 
3737 	for (i = 0; i < vddc_table->count; i++) {
3738 		if (requested_voltage <= vddc_table->entries[i].v) {
3739 			requested_voltage = vddc_table->entries[i].v;
3740 			return (ci_send_msg_to_smc_with_parameter(rdev,
3741 								  PPSMC_MSG_VddC_Request,
3742 								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3743 				0 : -EINVAL;
3744 		}
3745 	}
3746 
3747 	return -EINVAL;
3748 }
3749 
3750 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3751 {
3752 	struct ci_power_info *pi = ci_get_pi(rdev);
3753 	PPSMC_Result result;
3754 
3755 	ci_apply_disp_minimum_voltage_request(rdev);
3756 
3757 	if (!pi->sclk_dpm_key_disabled) {
3758 		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3759 			result = ci_send_msg_to_smc_with_parameter(rdev,
3760 								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3761 								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3762 			if (result != PPSMC_Result_OK)
3763 				return -EINVAL;
3764 		}
3765 	}
3766 
3767 	if (!pi->mclk_dpm_key_disabled) {
3768 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3769 			result = ci_send_msg_to_smc_with_parameter(rdev,
3770 								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3771 								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3772 			if (result != PPSMC_Result_OK)
3773 				return -EINVAL;
3774 		}
3775 	}
3776 #if 0
3777 	if (!pi->pcie_dpm_key_disabled) {
3778 		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3779 			result = ci_send_msg_to_smc_with_parameter(rdev,
3780 								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3781 								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3782 			if (result != PPSMC_Result_OK)
3783 				return -EINVAL;
3784 		}
3785 	}
3786 #endif
3787 	return 0;
3788 }
3789 
3790 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3791 						   struct radeon_ps *radeon_state)
3792 {
3793 	struct ci_power_info *pi = ci_get_pi(rdev);
3794 	struct ci_ps *state = ci_get_ps(radeon_state);
3795 	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3796 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3797 	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3798 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3799 	u32 i;
3800 
3801 	pi->need_update_smu7_dpm_table = 0;
3802 
3803 	for (i = 0; i < sclk_table->count; i++) {
3804 		if (sclk == sclk_table->dpm_levels[i].value)
3805 			break;
3806 	}
3807 
3808 	if (i >= sclk_table->count) {
3809 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3810 	} else {
3811 		/* XXX check display min clock requirements */
3812 		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3813 			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3814 	}
3815 
3816 	for (i = 0; i < mclk_table->count; i++) {
3817 		if (mclk == mclk_table->dpm_levels[i].value)
3818 			break;
3819 	}
3820 
3821 	if (i >= mclk_table->count)
3822 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3823 
3824 	if (rdev->pm.dpm.current_active_crtc_count !=
3825 	    rdev->pm.dpm.new_active_crtc_count)
3826 		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3827 }
3828 
3829 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3830 						       struct radeon_ps *radeon_state)
3831 {
3832 	struct ci_power_info *pi = ci_get_pi(rdev);
3833 	struct ci_ps *state = ci_get_ps(radeon_state);
3834 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3835 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3836 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3837 	int ret;
3838 
3839 	if (!pi->need_update_smu7_dpm_table)
3840 		return 0;
3841 
3842 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3843 		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3844 
3845 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3846 		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3847 
3848 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3849 		ret = ci_populate_all_graphic_levels(rdev);
3850 		if (ret)
3851 			return ret;
3852 	}
3853 
3854 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3855 		ret = ci_populate_all_memory_levels(rdev);
3856 		if (ret)
3857 			return ret;
3858 	}
3859 
3860 	return 0;
3861 }
3862 
3863 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3864 {
3865 	struct ci_power_info *pi = ci_get_pi(rdev);
3866 	const struct radeon_clock_and_voltage_limits *max_limits;
3867 	int i;
3868 
3869 	if (rdev->pm.dpm.ac_power)
3870 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3871 	else
3872 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3873 
3874 	if (enable) {
3875 		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3876 
3877 		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3878 			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3879 				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3880 
3881 				if (!pi->caps_uvd_dpm)
3882 					break;
3883 			}
3884 		}
3885 
3886 		ci_send_msg_to_smc_with_parameter(rdev,
3887 						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3888 						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3889 
3890 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3891 			pi->uvd_enabled = true;
3892 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3893 			ci_send_msg_to_smc_with_parameter(rdev,
3894 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3895 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3896 		}
3897 	} else {
3898 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3899 			pi->uvd_enabled = false;
3900 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3901 			ci_send_msg_to_smc_with_parameter(rdev,
3902 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3903 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3904 		}
3905 	}
3906 
3907 	return (ci_send_msg_to_smc(rdev, enable ?
3908 				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3909 		0 : -EINVAL;
3910 }
3911 
3912 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3913 {
3914 	struct ci_power_info *pi = ci_get_pi(rdev);
3915 	const struct radeon_clock_and_voltage_limits *max_limits;
3916 	int i;
3917 
3918 	if (rdev->pm.dpm.ac_power)
3919 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3920 	else
3921 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3922 
3923 	if (enable) {
3924 		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3925 		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3926 			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3927 				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3928 
3929 				if (!pi->caps_vce_dpm)
3930 					break;
3931 			}
3932 		}
3933 
3934 		ci_send_msg_to_smc_with_parameter(rdev,
3935 						  PPSMC_MSG_VCEDPM_SetEnabledMask,
3936 						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3937 	}
3938 
3939 	return (ci_send_msg_to_smc(rdev, enable ?
3940 				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3941 		0 : -EINVAL;
3942 }
3943 
3944 #if 0
3945 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3946 {
3947 	struct ci_power_info *pi = ci_get_pi(rdev);
3948 	const struct radeon_clock_and_voltage_limits *max_limits;
3949 	int i;
3950 
3951 	if (rdev->pm.dpm.ac_power)
3952 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3953 	else
3954 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3955 
3956 	if (enable) {
3957 		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3958 		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3959 			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3960 				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3961 
3962 				if (!pi->caps_samu_dpm)
3963 					break;
3964 			}
3965 		}
3966 
3967 		ci_send_msg_to_smc_with_parameter(rdev,
3968 						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
3969 						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3970 	}
3971 	return (ci_send_msg_to_smc(rdev, enable ?
3972 				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3973 		0 : -EINVAL;
3974 }
3975 
3976 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3977 {
3978 	struct ci_power_info *pi = ci_get_pi(rdev);
3979 	const struct radeon_clock_and_voltage_limits *max_limits;
3980 	int i;
3981 
3982 	if (rdev->pm.dpm.ac_power)
3983 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3984 	else
3985 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3986 
3987 	if (enable) {
3988 		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3989 		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3990 			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3991 				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3992 
3993 				if (!pi->caps_acp_dpm)
3994 					break;
3995 			}
3996 		}
3997 
3998 		ci_send_msg_to_smc_with_parameter(rdev,
3999 						  PPSMC_MSG_ACPDPM_SetEnabledMask,
4000 						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4001 	}
4002 
4003 	return (ci_send_msg_to_smc(rdev, enable ?
4004 				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4005 		0 : -EINVAL;
4006 }
4007 #endif
4008 
4009 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
4010 {
4011 	struct ci_power_info *pi = ci_get_pi(rdev);
4012 	u32 tmp;
4013 
4014 	if (!gate) {
4015 		if (pi->caps_uvd_dpm ||
4016 		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4017 			pi->smc_state_table.UvdBootLevel = 0;
4018 		else
4019 			pi->smc_state_table.UvdBootLevel =
4020 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4021 
4022 		tmp = RREG32_SMC(DPM_TABLE_475);
4023 		tmp &= ~UvdBootLevel_MASK;
4024 		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
4025 		WREG32_SMC(DPM_TABLE_475, tmp);
4026 	}
4027 
4028 	return ci_enable_uvd_dpm(rdev, !gate);
4029 }
4030 
4031 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
4032 {
4033 	u8 i;
4034 	u32 min_evclk = 30000; /* ??? */
4035 	struct radeon_vce_clock_voltage_dependency_table *table =
4036 		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4037 
4038 	for (i = 0; i < table->count; i++) {
4039 		if (table->entries[i].evclk >= min_evclk)
4040 			return i;
4041 	}
4042 
4043 	return table->count - 1;
4044 }
4045 
4046 static int ci_update_vce_dpm(struct radeon_device *rdev,
4047 			     struct radeon_ps *radeon_new_state,
4048 			     struct radeon_ps *radeon_current_state)
4049 {
4050 	struct ci_power_info *pi = ci_get_pi(rdev);
4051 	int ret = 0;
4052 	u32 tmp;
4053 
4054 	if (radeon_current_state->evclk != radeon_new_state->evclk) {
4055 		if (radeon_new_state->evclk) {
4056 			/* turn the clocks on when encoding */
4057 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
4058 
4059 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
4060 			tmp = RREG32_SMC(DPM_TABLE_475);
4061 			tmp &= ~VceBootLevel_MASK;
4062 			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
4063 			WREG32_SMC(DPM_TABLE_475, tmp);
4064 
4065 			ret = ci_enable_vce_dpm(rdev, true);
4066 		} else {
4067 			/* turn the clocks off when not encoding */
4068 			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
4069 
4070 			ret = ci_enable_vce_dpm(rdev, false);
4071 		}
4072 	}
4073 	return ret;
4074 }
4075 
4076 #if 0
4077 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
4078 {
4079 	return ci_enable_samu_dpm(rdev, gate);
4080 }
4081 
4082 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
4083 {
4084 	struct ci_power_info *pi = ci_get_pi(rdev);
4085 	u32 tmp;
4086 
4087 	if (!gate) {
4088 		pi->smc_state_table.AcpBootLevel = 0;
4089 
4090 		tmp = RREG32_SMC(DPM_TABLE_475);
4091 		tmp &= ~AcpBootLevel_MASK;
4092 		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4093 		WREG32_SMC(DPM_TABLE_475, tmp);
4094 	}
4095 
4096 	return ci_enable_acp_dpm(rdev, !gate);
4097 }
4098 #endif
4099 
4100 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
4101 					     struct radeon_ps *radeon_state)
4102 {
4103 	struct ci_power_info *pi = ci_get_pi(rdev);
4104 	int ret;
4105 
4106 	ret = ci_trim_dpm_states(rdev, radeon_state);
4107 	if (ret)
4108 		return ret;
4109 
4110 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4111 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4112 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4113 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4114 	pi->last_mclk_dpm_enable_mask =
4115 		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4116 	if (pi->uvd_enabled) {
4117 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4118 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4119 	}
4120 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4121 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4122 
4123 	return 0;
4124 }
4125 
4126 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
4127 				       u32 level_mask)
4128 {
4129 	u32 level = 0;
4130 
4131 	while ((level_mask & (1 << level)) == 0)
4132 		level++;
4133 
4134 	return level;
4135 }
4136 
4137 
4138 int ci_dpm_force_performance_level(struct radeon_device *rdev,
4139 				   enum radeon_dpm_forced_level level)
4140 {
4141 	struct ci_power_info *pi = ci_get_pi(rdev);
4142 	u32 tmp, levels, i;
4143 	int ret;
4144 
4145 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4146 		if ((!pi->pcie_dpm_key_disabled) &&
4147 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4148 			levels = 0;
4149 			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4150 			while (tmp >>= 1)
4151 				levels++;
4152 			if (levels) {
4153 				ret = ci_dpm_force_state_pcie(rdev, level);
4154 				if (ret)
4155 					return ret;
4156 				for (i = 0; i < rdev->usec_timeout; i++) {
4157 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4158 					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4159 					if (tmp == levels)
4160 						break;
4161 					udelay(1);
4162 				}
4163 			}
4164 		}
4165 		if ((!pi->sclk_dpm_key_disabled) &&
4166 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4167 			levels = 0;
4168 			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4169 			while (tmp >>= 1)
4170 				levels++;
4171 			if (levels) {
4172 				ret = ci_dpm_force_state_sclk(rdev, levels);
4173 				if (ret)
4174 					return ret;
4175 				for (i = 0; i < rdev->usec_timeout; i++) {
4176 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4177 					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4178 					if (tmp == levels)
4179 						break;
4180 					udelay(1);
4181 				}
4182 			}
4183 		}
4184 		if ((!pi->mclk_dpm_key_disabled) &&
4185 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4186 			levels = 0;
4187 			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4188 			while (tmp >>= 1)
4189 				levels++;
4190 			if (levels) {
4191 				ret = ci_dpm_force_state_mclk(rdev, levels);
4192 				if (ret)
4193 					return ret;
4194 				for (i = 0; i < rdev->usec_timeout; i++) {
4195 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4196 					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4197 					if (tmp == levels)
4198 						break;
4199 					udelay(1);
4200 				}
4201 			}
4202 		}
4203 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
4204 		if ((!pi->sclk_dpm_key_disabled) &&
4205 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4206 			levels = ci_get_lowest_enabled_level(rdev,
4207 							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4208 			ret = ci_dpm_force_state_sclk(rdev, levels);
4209 			if (ret)
4210 				return ret;
4211 			for (i = 0; i < rdev->usec_timeout; i++) {
4212 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4213 				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4214 				if (tmp == levels)
4215 					break;
4216 				udelay(1);
4217 			}
4218 		}
4219 		if ((!pi->mclk_dpm_key_disabled) &&
4220 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4221 			levels = ci_get_lowest_enabled_level(rdev,
4222 							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4223 			ret = ci_dpm_force_state_mclk(rdev, levels);
4224 			if (ret)
4225 				return ret;
4226 			for (i = 0; i < rdev->usec_timeout; i++) {
4227 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4228 				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4229 				if (tmp == levels)
4230 					break;
4231 				udelay(1);
4232 			}
4233 		}
4234 		if ((!pi->pcie_dpm_key_disabled) &&
4235 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4236 			levels = ci_get_lowest_enabled_level(rdev,
4237 							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4238 			ret = ci_dpm_force_state_pcie(rdev, levels);
4239 			if (ret)
4240 				return ret;
4241 			for (i = 0; i < rdev->usec_timeout; i++) {
4242 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4243 				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4244 				if (tmp == levels)
4245 					break;
4246 				udelay(1);
4247 			}
4248 		}
4249 	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4250 		if (!pi->pcie_dpm_key_disabled) {
4251 			PPSMC_Result smc_result;
4252 
4253 			smc_result = ci_send_msg_to_smc(rdev,
4254 							PPSMC_MSG_PCIeDPM_UnForceLevel);
4255 			if (smc_result != PPSMC_Result_OK)
4256 				return -EINVAL;
4257 		}
4258 		ret = ci_upload_dpm_level_enable_mask(rdev);
4259 		if (ret)
4260 			return ret;
4261 	}
4262 
4263 	rdev->pm.dpm.forced_level = level;
4264 
4265 	return 0;
4266 }
4267 
4268 static int ci_set_mc_special_registers(struct radeon_device *rdev,
4269 				       struct ci_mc_reg_table *table)
4270 {
4271 	struct ci_power_info *pi = ci_get_pi(rdev);
4272 	u8 i, j, k;
4273 	u32 temp_reg;
4274 
4275 	for (i = 0, j = table->last; i < table->last; i++) {
4276 		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4277 			return -EINVAL;
4278 		switch(table->mc_reg_address[i].s1 << 2) {
4279 		case MC_SEQ_MISC1:
4280 			temp_reg = RREG32(MC_PMG_CMD_EMRS);
4281 			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
4282 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4283 			for (k = 0; k < table->num_entries; k++) {
4284 				table->mc_reg_table_entry[k].mc_data[j] =
4285 					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4286 			}
4287 			j++;
4288 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4289 				return -EINVAL;
4290 
4291 			temp_reg = RREG32(MC_PMG_CMD_MRS);
4292 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
4293 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4294 			for (k = 0; k < table->num_entries; k++) {
4295 				table->mc_reg_table_entry[k].mc_data[j] =
4296 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4297 				if (!pi->mem_gddr5)
4298 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4299 			}
4300 			j++;
4301 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4302 				return -EINVAL;
4303 
4304 			if (!pi->mem_gddr5) {
4305 				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
4306 				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
4307 				for (k = 0; k < table->num_entries; k++) {
4308 					table->mc_reg_table_entry[k].mc_data[j] =
4309 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4310 				}
4311 				j++;
4312 				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4313 					return -EINVAL;
4314 			}
4315 			break;
4316 		case MC_SEQ_RESERVE_M:
4317 			temp_reg = RREG32(MC_PMG_CMD_MRS1);
4318 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
4319 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4320 			for (k = 0; k < table->num_entries; k++) {
4321 				table->mc_reg_table_entry[k].mc_data[j] =
4322 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4323 			}
4324 			j++;
4325 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4326 				return -EINVAL;
4327 			break;
4328 		default:
4329 			break;
4330 		}
4331 
4332 	}
4333 
4334 	table->last = j;
4335 
4336 	return 0;
4337 }
4338 
4339 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4340 {
4341 	bool result = true;
4342 
4343 	switch(in_reg) {
4344 	case MC_SEQ_RAS_TIMING >> 2:
4345 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
4346 		break;
4347 	case MC_SEQ_DLL_STBY >> 2:
4348 		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
4349 		break;
4350 	case MC_SEQ_G5PDX_CMD0 >> 2:
4351 		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
4352 		break;
4353 	case MC_SEQ_G5PDX_CMD1 >> 2:
4354 		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
4355 		break;
4356 	case MC_SEQ_G5PDX_CTRL >> 2:
4357 		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
4358 		break;
4359 	case MC_SEQ_CAS_TIMING >> 2:
4360 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
4361             break;
4362 	case MC_SEQ_MISC_TIMING >> 2:
4363 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4364 		break;
4365 	case MC_SEQ_MISC_TIMING2 >> 2:
4366 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4367 		break;
4368 	case MC_SEQ_PMG_DVS_CMD >> 2:
4369 		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4370 		break;
4371 	case MC_SEQ_PMG_DVS_CTL >> 2:
4372 		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4373 		break;
4374 	case MC_SEQ_RD_CTL_D0 >> 2:
4375 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4376 		break;
4377 	case MC_SEQ_RD_CTL_D1 >> 2:
4378 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4379 		break;
4380 	case MC_SEQ_WR_CTL_D0 >> 2:
4381 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4382 		break;
4383 	case MC_SEQ_WR_CTL_D1 >> 2:
4384 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4385 		break;
4386 	case MC_PMG_CMD_EMRS >> 2:
4387 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4388 		break;
4389 	case MC_PMG_CMD_MRS >> 2:
4390 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4391 		break;
4392 	case MC_PMG_CMD_MRS1 >> 2:
4393 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4394 		break;
4395 	case MC_SEQ_PMG_TIMING >> 2:
4396 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4397 		break;
4398 	case MC_PMG_CMD_MRS2 >> 2:
4399 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4400 		break;
4401 	case MC_SEQ_WR_CTL_2 >> 2:
4402 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4403 		break;
4404 	default:
4405 		result = false;
4406 		break;
4407 	}
4408 
4409 	return result;
4410 }
4411 
4412 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4413 {
4414 	u8 i, j;
4415 
4416 	for (i = 0; i < table->last; i++) {
4417 		for (j = 1; j < table->num_entries; j++) {
4418 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4419 			    table->mc_reg_table_entry[j].mc_data[i]) {
4420 				table->valid_flag |= 1 << i;
4421 				break;
4422 			}
4423 		}
4424 	}
4425 }
4426 
4427 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4428 {
4429 	u32 i;
4430 	u16 address;
4431 
4432 	for (i = 0; i < table->last; i++) {
4433 		table->mc_reg_address[i].s0 =
4434 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4435 			address : table->mc_reg_address[i].s1;
4436 	}
4437 }
4438 
4439 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4440 				      struct ci_mc_reg_table *ci_table)
4441 {
4442 	u8 i, j;
4443 
4444 	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4445 		return -EINVAL;
4446 	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4447 		return -EINVAL;
4448 
4449 	for (i = 0; i < table->last; i++)
4450 		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4451 
4452 	ci_table->last = table->last;
4453 
4454 	for (i = 0; i < table->num_entries; i++) {
4455 		ci_table->mc_reg_table_entry[i].mclk_max =
4456 			table->mc_reg_table_entry[i].mclk_max;
4457 		for (j = 0; j < table->last; j++)
4458 			ci_table->mc_reg_table_entry[i].mc_data[j] =
4459 				table->mc_reg_table_entry[i].mc_data[j];
4460 	}
4461 	ci_table->num_entries = table->num_entries;
4462 
4463 	return 0;
4464 }
4465 
4466 static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4467 				       struct ci_mc_reg_table *table)
4468 {
4469 	u8 i, k;
4470 	u32 tmp;
4471 	bool patch;
4472 
4473 	tmp = RREG32(MC_SEQ_MISC0);
4474 	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4475 
4476 	if (patch &&
4477 	    ((rdev->pdev->device == 0x67B0) ||
4478 	     (rdev->pdev->device == 0x67B1))) {
4479 		for (i = 0; i < table->last; i++) {
4480 			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4481 				return -EINVAL;
4482 			switch(table->mc_reg_address[i].s1 >> 2) {
4483 			case MC_SEQ_MISC1:
4484 				for (k = 0; k < table->num_entries; k++) {
4485 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4486 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4487 						table->mc_reg_table_entry[k].mc_data[i] =
4488 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4489 							0x00000007;
4490 				}
4491 				break;
4492 			case MC_SEQ_WR_CTL_D0:
4493 				for (k = 0; k < table->num_entries; k++) {
4494 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4495 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4496 						table->mc_reg_table_entry[k].mc_data[i] =
4497 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4498 							0x0000D0DD;
4499 				}
4500 				break;
4501 			case MC_SEQ_WR_CTL_D1:
4502 				for (k = 0; k < table->num_entries; k++) {
4503 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4504 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4505 						table->mc_reg_table_entry[k].mc_data[i] =
4506 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4507 							0x0000D0DD;
4508 				}
4509 				break;
4510 			case MC_SEQ_WR_CTL_2:
4511 				for (k = 0; k < table->num_entries; k++) {
4512 					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4513 					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4514 						table->mc_reg_table_entry[k].mc_data[i] = 0;
4515 				}
4516 				break;
4517 			case MC_SEQ_CAS_TIMING:
4518 				for (k = 0; k < table->num_entries; k++) {
4519 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4520 						table->mc_reg_table_entry[k].mc_data[i] =
4521 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4522 							0x000C0140;
4523 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4524 						table->mc_reg_table_entry[k].mc_data[i] =
4525 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4526 							0x000C0150;
4527 				}
4528 				break;
4529 			case MC_SEQ_MISC_TIMING:
4530 				for (k = 0; k < table->num_entries; k++) {
4531 					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4532 						table->mc_reg_table_entry[k].mc_data[i] =
4533 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4534 							0x00000030;
4535 					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4536 						table->mc_reg_table_entry[k].mc_data[i] =
4537 							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4538 							0x00000035;
4539 				}
4540 				break;
4541 			default:
4542 				break;
4543 			}
4544 		}
4545 
4546 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4547 		tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4548 		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4549 		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4550 		WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4551 	}
4552 
4553 	return 0;
4554 }
4555 
4556 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4557 {
4558 	struct ci_power_info *pi = ci_get_pi(rdev);
4559 	struct atom_mc_reg_table *table;
4560 	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4561 	u8 module_index = rv770_get_memory_module_index(rdev);
4562 	int ret;
4563 
4564 	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4565 	if (!table)
4566 		return -ENOMEM;
4567 
4568 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4569 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4570 	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4571 	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4572 	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4573 	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4574 	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4575 	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4576 	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4577 	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4578 	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4579 	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4580 	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4581 	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4582 	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4583 	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4584 	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4585 	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4586 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4587 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4588 
4589 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4590 	if (ret)
4591 		goto init_mc_done;
4592 
4593         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4594 	if (ret)
4595 		goto init_mc_done;
4596 
4597 	ci_set_s0_mc_reg_index(ci_table);
4598 
4599 	ret = ci_register_patching_mc_seq(rdev, ci_table);
4600 	if (ret)
4601 		goto init_mc_done;
4602 
4603 	ret = ci_set_mc_special_registers(rdev, ci_table);
4604 	if (ret)
4605 		goto init_mc_done;
4606 
4607 	ci_set_valid_flag(ci_table);
4608 
4609 init_mc_done:
4610 	kfree(table);
4611 
4612 	return ret;
4613 }
4614 
4615 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4616 					SMU7_Discrete_MCRegisters *mc_reg_table)
4617 {
4618 	struct ci_power_info *pi = ci_get_pi(rdev);
4619 	u32 i, j;
4620 
4621 	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4622 		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4623 			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4624 				return -EINVAL;
4625 			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4626 			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4627 			i++;
4628 		}
4629 	}
4630 
4631 	mc_reg_table->last = (u8)i;
4632 
4633 	return 0;
4634 }
4635 
4636 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4637 				    SMU7_Discrete_MCRegisterSet *data,
4638 				    u32 num_entries, u32 valid_flag)
4639 {
4640 	u32 i, j;
4641 
4642 	for (i = 0, j = 0; j < num_entries; j++) {
4643 		if (valid_flag & (1 << j)) {
4644 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4645 			i++;
4646 		}
4647 	}
4648 }
4649 
4650 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4651 						 const u32 memory_clock,
4652 						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4653 {
4654 	struct ci_power_info *pi = ci_get_pi(rdev);
4655 	u32 i = 0;
4656 
4657 	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4658 		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4659 			break;
4660 	}
4661 
4662 	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4663 		--i;
4664 
4665 	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4666 				mc_reg_table_data, pi->mc_reg_table.last,
4667 				pi->mc_reg_table.valid_flag);
4668 }
4669 
4670 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4671 					   SMU7_Discrete_MCRegisters *mc_reg_table)
4672 {
4673 	struct ci_power_info *pi = ci_get_pi(rdev);
4674 	u32 i;
4675 
4676 	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4677 		ci_convert_mc_reg_table_entry_to_smc(rdev,
4678 						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4679 						     &mc_reg_table->data[i]);
4680 }
4681 
4682 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4683 {
4684 	struct ci_power_info *pi = ci_get_pi(rdev);
4685 	int ret;
4686 
4687 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4688 
4689 	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4690 	if (ret)
4691 		return ret;
4692 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4693 
4694 	return ci_copy_bytes_to_smc(rdev,
4695 				    pi->mc_reg_table_start,
4696 				    (u8 *)&pi->smc_mc_reg_table,
4697 				    sizeof(SMU7_Discrete_MCRegisters),
4698 				    pi->sram_end);
4699 }
4700 
4701 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4702 {
4703 	struct ci_power_info *pi = ci_get_pi(rdev);
4704 
4705 	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4706 		return 0;
4707 
4708 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4709 
4710 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4711 
4712 	return ci_copy_bytes_to_smc(rdev,
4713 				    pi->mc_reg_table_start +
4714 				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4715 				    (u8 *)&pi->smc_mc_reg_table.data[0],
4716 				    sizeof(SMU7_Discrete_MCRegisterSet) *
4717 				    pi->dpm_table.mclk_table.count,
4718 				    pi->sram_end);
4719 }
4720 
4721 static void ci_enable_voltage_control(struct radeon_device *rdev)
4722 {
4723 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4724 
4725 	tmp |= VOLT_PWRMGT_EN;
4726 	WREG32_SMC(GENERAL_PWRMGT, tmp);
4727 }
4728 
4729 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4730 						      struct radeon_ps *radeon_state)
4731 {
4732 	struct ci_ps *state = ci_get_ps(radeon_state);
4733 	int i;
4734 	u16 pcie_speed, max_speed = 0;
4735 
4736 	for (i = 0; i < state->performance_level_count; i++) {
4737 		pcie_speed = state->performance_levels[i].pcie_gen;
4738 		if (max_speed < pcie_speed)
4739 			max_speed = pcie_speed;
4740 	}
4741 
4742 	return max_speed;
4743 }
4744 
4745 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4746 {
4747 	u32 speed_cntl = 0;
4748 
4749 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4750 	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4751 
4752 	return (u16)speed_cntl;
4753 }
4754 
4755 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4756 {
4757 	u32 link_width = 0;
4758 
4759 	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4760 	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4761 
4762 	switch (link_width) {
4763 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4764 		return 1;
4765 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4766 		return 2;
4767 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4768 		return 4;
4769 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4770 		return 8;
4771 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4772 		/* not actually supported */
4773 		return 12;
4774 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4775 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4776 	default:
4777 		return 16;
4778 	}
4779 }
4780 
4781 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4782 							     struct radeon_ps *radeon_new_state,
4783 							     struct radeon_ps *radeon_current_state)
4784 {
4785 	struct ci_power_info *pi = ci_get_pi(rdev);
4786 	enum radeon_pcie_gen target_link_speed =
4787 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4788 	enum radeon_pcie_gen current_link_speed;
4789 
4790 	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4791 		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4792 	else
4793 		current_link_speed = pi->force_pcie_gen;
4794 
4795 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4796 	pi->pspp_notify_required = false;
4797 	if (target_link_speed > current_link_speed) {
4798 		switch (target_link_speed) {
4799 #ifdef CONFIG_ACPI
4800 		case RADEON_PCIE_GEN3:
4801 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4802 				break;
4803 			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4804 			if (current_link_speed == RADEON_PCIE_GEN2)
4805 				break;
4806 		case RADEON_PCIE_GEN2:
4807 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4808 				break;
4809 #endif
4810 		default:
4811 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4812 			break;
4813 		}
4814 	} else {
4815 		if (target_link_speed < current_link_speed)
4816 			pi->pspp_notify_required = true;
4817 	}
4818 }
4819 
4820 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4821 							   struct radeon_ps *radeon_new_state,
4822 							   struct radeon_ps *radeon_current_state)
4823 {
4824 	struct ci_power_info *pi = ci_get_pi(rdev);
4825 	enum radeon_pcie_gen target_link_speed =
4826 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4827 	u8 request;
4828 
4829 	if (pi->pspp_notify_required) {
4830 		if (target_link_speed == RADEON_PCIE_GEN3)
4831 			request = PCIE_PERF_REQ_PECI_GEN3;
4832 		else if (target_link_speed == RADEON_PCIE_GEN2)
4833 			request = PCIE_PERF_REQ_PECI_GEN2;
4834 		else
4835 			request = PCIE_PERF_REQ_PECI_GEN1;
4836 
4837 		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4838 		    (ci_get_current_pcie_speed(rdev) > 0))
4839 			return;
4840 
4841 #ifdef CONFIG_ACPI
4842 		radeon_acpi_pcie_performance_request(rdev, request, false);
4843 #endif
4844 	}
4845 }
4846 
4847 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4848 {
4849 	struct ci_power_info *pi = ci_get_pi(rdev);
4850 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4851 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4852 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4853 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4854 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4855 		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4856 
4857 	if (allowed_sclk_vddc_table == NULL)
4858 		return -EINVAL;
4859 	if (allowed_sclk_vddc_table->count < 1)
4860 		return -EINVAL;
4861 	if (allowed_mclk_vddc_table == NULL)
4862 		return -EINVAL;
4863 	if (allowed_mclk_vddc_table->count < 1)
4864 		return -EINVAL;
4865 	if (allowed_mclk_vddci_table == NULL)
4866 		return -EINVAL;
4867 	if (allowed_mclk_vddci_table->count < 1)
4868 		return -EINVAL;
4869 
4870 	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4871 	pi->max_vddc_in_pp_table =
4872 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4873 
4874 	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4875 	pi->max_vddci_in_pp_table =
4876 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4877 
4878 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4879 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4880 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4881 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4882 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4883 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4884         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4885 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4886 
4887 	return 0;
4888 }
4889 
4890 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4891 {
4892 	struct ci_power_info *pi = ci_get_pi(rdev);
4893 	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4894 	u32 leakage_index;
4895 
4896 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4897 		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4898 			*vddc = leakage_table->actual_voltage[leakage_index];
4899 			break;
4900 		}
4901 	}
4902 }
4903 
4904 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4905 {
4906 	struct ci_power_info *pi = ci_get_pi(rdev);
4907 	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4908 	u32 leakage_index;
4909 
4910 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4911 		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4912 			*vddci = leakage_table->actual_voltage[leakage_index];
4913 			break;
4914 		}
4915 	}
4916 }
4917 
4918 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4919 								      struct radeon_clock_voltage_dependency_table *table)
4920 {
4921 	u32 i;
4922 
4923 	if (table) {
4924 		for (i = 0; i < table->count; i++)
4925 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4926 	}
4927 }
4928 
4929 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4930 								       struct radeon_clock_voltage_dependency_table *table)
4931 {
4932 	u32 i;
4933 
4934 	if (table) {
4935 		for (i = 0; i < table->count; i++)
4936 			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4937 	}
4938 }
4939 
4940 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4941 									  struct radeon_vce_clock_voltage_dependency_table *table)
4942 {
4943 	u32 i;
4944 
4945 	if (table) {
4946 		for (i = 0; i < table->count; i++)
4947 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4948 	}
4949 }
4950 
4951 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4952 									  struct radeon_uvd_clock_voltage_dependency_table *table)
4953 {
4954 	u32 i;
4955 
4956 	if (table) {
4957 		for (i = 0; i < table->count; i++)
4958 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4959 	}
4960 }
4961 
4962 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4963 								   struct radeon_phase_shedding_limits_table *table)
4964 {
4965 	u32 i;
4966 
4967 	if (table) {
4968 		for (i = 0; i < table->count; i++)
4969 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4970 	}
4971 }
4972 
4973 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4974 							    struct radeon_clock_and_voltage_limits *table)
4975 {
4976 	if (table) {
4977 		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4978 		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4979 	}
4980 }
4981 
4982 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4983 							 struct radeon_cac_leakage_table *table)
4984 {
4985 	u32 i;
4986 
4987 	if (table) {
4988 		for (i = 0; i < table->count; i++)
4989 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4990 	}
4991 }
4992 
4993 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4994 {
4995 
4996 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4997 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4998 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4999 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5000 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5001 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5002 	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
5003 								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5004 	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5005 								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5006 	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5007 								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5008 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5009 								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5010 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5011 								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5012 	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
5013 							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
5014 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5015 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5016 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5017 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5018 	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
5019 						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
5020 
5021 }
5022 
5023 static void ci_get_memory_type(struct radeon_device *rdev)
5024 {
5025 	struct ci_power_info *pi = ci_get_pi(rdev);
5026 	u32 tmp;
5027 
5028 	tmp = RREG32(MC_SEQ_MISC0);
5029 
5030 	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
5031 	    MC_SEQ_MISC0_GDDR5_VALUE)
5032 		pi->mem_gddr5 = true;
5033 	else
5034 		pi->mem_gddr5 = false;
5035 
5036 }
5037 
5038 static void ci_update_current_ps(struct radeon_device *rdev,
5039 				 struct radeon_ps *rps)
5040 {
5041 	struct ci_ps *new_ps = ci_get_ps(rps);
5042 	struct ci_power_info *pi = ci_get_pi(rdev);
5043 
5044 	pi->current_rps = *rps;
5045 	pi->current_ps = *new_ps;
5046 	pi->current_rps.ps_priv = &pi->current_ps;
5047 }
5048 
5049 static void ci_update_requested_ps(struct radeon_device *rdev,
5050 				   struct radeon_ps *rps)
5051 {
5052 	struct ci_ps *new_ps = ci_get_ps(rps);
5053 	struct ci_power_info *pi = ci_get_pi(rdev);
5054 
5055 	pi->requested_rps = *rps;
5056 	pi->requested_ps = *new_ps;
5057 	pi->requested_rps.ps_priv = &pi->requested_ps;
5058 }
5059 
5060 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
5061 {
5062 	struct ci_power_info *pi = ci_get_pi(rdev);
5063 	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5064 	struct radeon_ps *new_ps = &requested_ps;
5065 
5066 	ci_update_requested_ps(rdev, new_ps);
5067 
5068 	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
5069 
5070 	return 0;
5071 }
5072 
5073 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
5074 {
5075 	struct ci_power_info *pi = ci_get_pi(rdev);
5076 	struct radeon_ps *new_ps = &pi->requested_rps;
5077 
5078 	ci_update_current_ps(rdev, new_ps);
5079 }
5080 
5081 
5082 void ci_dpm_setup_asic(struct radeon_device *rdev)
5083 {
5084 	int r;
5085 
5086 	r = ci_mc_load_microcode(rdev);
5087 	if (r)
5088 		DRM_ERROR("Failed to load MC firmware!\n");
5089 	ci_read_clock_registers(rdev);
5090 	ci_get_memory_type(rdev);
5091 	ci_enable_acpi_power_management(rdev);
5092 	ci_init_sclk_t(rdev);
5093 }
5094 
5095 int ci_dpm_enable(struct radeon_device *rdev)
5096 {
5097 	struct ci_power_info *pi = ci_get_pi(rdev);
5098 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5099 	int ret;
5100 
5101 	if (ci_is_smc_running(rdev))
5102 		return -EINVAL;
5103 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5104 		ci_enable_voltage_control(rdev);
5105 		ret = ci_construct_voltage_tables(rdev);
5106 		if (ret) {
5107 			DRM_ERROR("ci_construct_voltage_tables failed\n");
5108 			return ret;
5109 		}
5110 	}
5111 	if (pi->caps_dynamic_ac_timing) {
5112 		ret = ci_initialize_mc_reg_table(rdev);
5113 		if (ret)
5114 			pi->caps_dynamic_ac_timing = false;
5115 	}
5116 	if (pi->dynamic_ss)
5117 		ci_enable_spread_spectrum(rdev, true);
5118 	if (pi->thermal_protection)
5119 		ci_enable_thermal_protection(rdev, true);
5120 	ci_program_sstp(rdev);
5121 	ci_enable_display_gap(rdev);
5122 	ci_program_vc(rdev);
5123 	ret = ci_upload_firmware(rdev);
5124 	if (ret) {
5125 		DRM_ERROR("ci_upload_firmware failed\n");
5126 		return ret;
5127 	}
5128 	ret = ci_process_firmware_header(rdev);
5129 	if (ret) {
5130 		DRM_ERROR("ci_process_firmware_header failed\n");
5131 		return ret;
5132 	}
5133 	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
5134 	if (ret) {
5135 		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5136 		return ret;
5137 	}
5138 	ret = ci_init_smc_table(rdev);
5139 	if (ret) {
5140 		DRM_ERROR("ci_init_smc_table failed\n");
5141 		return ret;
5142 	}
5143 	ret = ci_init_arb_table_index(rdev);
5144 	if (ret) {
5145 		DRM_ERROR("ci_init_arb_table_index failed\n");
5146 		return ret;
5147 	}
5148 	if (pi->caps_dynamic_ac_timing) {
5149 		ret = ci_populate_initial_mc_reg_table(rdev);
5150 		if (ret) {
5151 			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5152 			return ret;
5153 		}
5154 	}
5155 	ret = ci_populate_pm_base(rdev);
5156 	if (ret) {
5157 		DRM_ERROR("ci_populate_pm_base failed\n");
5158 		return ret;
5159 	}
5160 	ci_dpm_start_smc(rdev);
5161 	ci_enable_vr_hot_gpio_interrupt(rdev);
5162 	ret = ci_notify_smc_display_change(rdev, false);
5163 	if (ret) {
5164 		DRM_ERROR("ci_notify_smc_display_change failed\n");
5165 		return ret;
5166 	}
5167 	ci_enable_sclk_control(rdev, true);
5168 	ret = ci_enable_ulv(rdev, true);
5169 	if (ret) {
5170 		DRM_ERROR("ci_enable_ulv failed\n");
5171 		return ret;
5172 	}
5173 	ret = ci_enable_ds_master_switch(rdev, true);
5174 	if (ret) {
5175 		DRM_ERROR("ci_enable_ds_master_switch failed\n");
5176 		return ret;
5177 	}
5178 	ret = ci_start_dpm(rdev);
5179 	if (ret) {
5180 		DRM_ERROR("ci_start_dpm failed\n");
5181 		return ret;
5182 	}
5183 	ret = ci_enable_didt(rdev, true);
5184 	if (ret) {
5185 		DRM_ERROR("ci_enable_didt failed\n");
5186 		return ret;
5187 	}
5188 	ret = ci_enable_smc_cac(rdev, true);
5189 	if (ret) {
5190 		DRM_ERROR("ci_enable_smc_cac failed\n");
5191 		return ret;
5192 	}
5193 	ret = ci_enable_power_containment(rdev, true);
5194 	if (ret) {
5195 		DRM_ERROR("ci_enable_power_containment failed\n");
5196 		return ret;
5197 	}
5198 
5199 	ret = ci_power_control_set_level(rdev);
5200 	if (ret) {
5201 		DRM_ERROR("ci_power_control_set_level failed\n");
5202 		return ret;
5203 	}
5204 
5205 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5206 
5207 	ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
5208 	if (ret) {
5209 		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5210 		return ret;
5211 	}
5212 
5213 	ci_thermal_start_thermal_controller(rdev);
5214 
5215 	ci_update_current_ps(rdev, boot_ps);
5216 
5217 	return 0;
5218 }
5219 
5220 static int ci_set_temperature_range(struct radeon_device *rdev)
5221 {
5222 	int ret;
5223 
5224 	ret = ci_thermal_enable_alert(rdev, false);
5225 	if (ret)
5226 		return ret;
5227 	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5228 	if (ret)
5229 		return ret;
5230 	ret = ci_thermal_enable_alert(rdev, true);
5231 	if (ret)
5232 		return ret;
5233 
5234 	return ret;
5235 }
5236 
5237 int ci_dpm_late_enable(struct radeon_device *rdev)
5238 {
5239 	int ret;
5240 
5241 	ret = ci_set_temperature_range(rdev);
5242 	if (ret)
5243 		return ret;
5244 
5245 	ci_dpm_powergate_uvd(rdev, true);
5246 
5247 	return 0;
5248 }
5249 
5250 void ci_dpm_disable(struct radeon_device *rdev)
5251 {
5252 	struct ci_power_info *pi = ci_get_pi(rdev);
5253 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5254 
5255 	ci_dpm_powergate_uvd(rdev, false);
5256 
5257 	if (!ci_is_smc_running(rdev))
5258 		return;
5259 
5260 	ci_thermal_stop_thermal_controller(rdev);
5261 
5262 	if (pi->thermal_protection)
5263 		ci_enable_thermal_protection(rdev, false);
5264 	ci_enable_power_containment(rdev, false);
5265 	ci_enable_smc_cac(rdev, false);
5266 	ci_enable_didt(rdev, false);
5267 	ci_enable_spread_spectrum(rdev, false);
5268 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5269 	ci_stop_dpm(rdev);
5270 	ci_enable_ds_master_switch(rdev, false);
5271 	ci_enable_ulv(rdev, false);
5272 	ci_clear_vc(rdev);
5273 	ci_reset_to_default(rdev);
5274 	ci_dpm_stop_smc(rdev);
5275 	ci_force_switch_to_arb_f0(rdev);
5276 	ci_enable_thermal_based_sclk_dpm(rdev, false);
5277 
5278 	ci_update_current_ps(rdev, boot_ps);
5279 }
5280 
5281 int ci_dpm_set_power_state(struct radeon_device *rdev)
5282 {
5283 	struct ci_power_info *pi = ci_get_pi(rdev);
5284 	struct radeon_ps *new_ps = &pi->requested_rps;
5285 	struct radeon_ps *old_ps = &pi->current_rps;
5286 	int ret;
5287 
5288 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
5289 	if (pi->pcie_performance_request)
5290 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5291 	ret = ci_freeze_sclk_mclk_dpm(rdev);
5292 	if (ret) {
5293 		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5294 		return ret;
5295 	}
5296 	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
5297 	if (ret) {
5298 		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5299 		return ret;
5300 	}
5301 	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
5302 	if (ret) {
5303 		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5304 		return ret;
5305 	}
5306 
5307 	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
5308 	if (ret) {
5309 		DRM_ERROR("ci_update_vce_dpm failed\n");
5310 		return ret;
5311 	}
5312 
5313 	ret = ci_update_sclk_t(rdev);
5314 	if (ret) {
5315 		DRM_ERROR("ci_update_sclk_t failed\n");
5316 		return ret;
5317 	}
5318 	if (pi->caps_dynamic_ac_timing) {
5319 		ret = ci_update_and_upload_mc_reg_table(rdev);
5320 		if (ret) {
5321 			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5322 			return ret;
5323 		}
5324 	}
5325 	ret = ci_program_memory_timing_parameters(rdev);
5326 	if (ret) {
5327 		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5328 		return ret;
5329 	}
5330 	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
5331 	if (ret) {
5332 		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5333 		return ret;
5334 	}
5335 	ret = ci_upload_dpm_level_enable_mask(rdev);
5336 	if (ret) {
5337 		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5338 		return ret;
5339 	}
5340 	if (pi->pcie_performance_request)
5341 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5342 
5343 	return 0;
5344 }
5345 
5346 void ci_dpm_reset_asic(struct radeon_device *rdev)
5347 {
5348 	ci_set_boot_state(rdev);
5349 }
5350 
5351 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
5352 {
5353 	ci_program_display_gap(rdev);
5354 }
5355 
5356 union power_info {
5357 	struct _ATOM_POWERPLAY_INFO info;
5358 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
5359 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
5360 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5361 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5362 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5363 };
5364 
5365 union pplib_clock_info {
5366 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5367 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5368 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5369 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5370 	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5371 	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5372 };
5373 
5374 union pplib_power_state {
5375 	struct _ATOM_PPLIB_STATE v1;
5376 	struct _ATOM_PPLIB_STATE_V2 v2;
5377 };
5378 
5379 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5380 					  struct radeon_ps *rps,
5381 					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5382 					  u8 table_rev)
5383 {
5384 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5385 	rps->class = le16_to_cpu(non_clock_info->usClassification);
5386 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5387 
5388 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5389 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5390 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5391 	} else {
5392 		rps->vclk = 0;
5393 		rps->dclk = 0;
5394 	}
5395 
5396 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5397 		rdev->pm.dpm.boot_ps = rps;
5398 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5399 		rdev->pm.dpm.uvd_ps = rps;
5400 }
5401 
5402 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5403 				      struct radeon_ps *rps, int index,
5404 				      union pplib_clock_info *clock_info)
5405 {
5406 	struct ci_power_info *pi = ci_get_pi(rdev);
5407 	struct ci_ps *ps = ci_get_ps(rps);
5408 	struct ci_pl *pl = &ps->performance_levels[index];
5409 
5410 	ps->performance_level_count = index + 1;
5411 
5412 	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5413 	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5414 	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5415 	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5416 
5417 	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5418 						 pi->sys_pcie_mask,
5419 						 pi->vbios_boot_state.pcie_gen_bootup_value,
5420 						 clock_info->ci.ucPCIEGen);
5421 	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5422 						   pi->vbios_boot_state.pcie_lane_bootup_value,
5423 						   le16_to_cpu(clock_info->ci.usPCIELane));
5424 
5425 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5426 		pi->acpi_pcie_gen = pl->pcie_gen;
5427 	}
5428 
5429 	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5430 		pi->ulv.supported = true;
5431 		pi->ulv.pl = *pl;
5432 		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5433 	}
5434 
5435 	/* patch up boot state */
5436 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5437 		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5438 		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5439 		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5440 		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5441 	}
5442 
5443 	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5444 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5445 		pi->use_pcie_powersaving_levels = true;
5446 		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5447 			pi->pcie_gen_powersaving.max = pl->pcie_gen;
5448 		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5449 			pi->pcie_gen_powersaving.min = pl->pcie_gen;
5450 		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5451 			pi->pcie_lane_powersaving.max = pl->pcie_lane;
5452 		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5453 			pi->pcie_lane_powersaving.min = pl->pcie_lane;
5454 		break;
5455 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5456 		pi->use_pcie_performance_levels = true;
5457 		if (pi->pcie_gen_performance.max < pl->pcie_gen)
5458 			pi->pcie_gen_performance.max = pl->pcie_gen;
5459 		if (pi->pcie_gen_performance.min > pl->pcie_gen)
5460 			pi->pcie_gen_performance.min = pl->pcie_gen;
5461 		if (pi->pcie_lane_performance.max < pl->pcie_lane)
5462 			pi->pcie_lane_performance.max = pl->pcie_lane;
5463 		if (pi->pcie_lane_performance.min > pl->pcie_lane)
5464 			pi->pcie_lane_performance.min = pl->pcie_lane;
5465 		break;
5466 	default:
5467 		break;
5468 	}
5469 }
5470 
5471 static int ci_parse_power_table(struct radeon_device *rdev)
5472 {
5473 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5474 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5475 	union pplib_power_state *power_state;
5476 	int i, j, k, non_clock_array_index, clock_array_index;
5477 	union pplib_clock_info *clock_info;
5478 	struct _StateArray *state_array;
5479 	struct _ClockInfoArray *clock_info_array;
5480 	struct _NonClockInfoArray *non_clock_info_array;
5481 	union power_info *power_info;
5482 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5483         u16 data_offset;
5484 	u8 frev, crev;
5485 	u8 *power_state_offset;
5486 	struct ci_ps *ps;
5487 
5488 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5489 				   &frev, &crev, &data_offset))
5490 		return -EINVAL;
5491 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5492 
5493 	state_array = (struct _StateArray *)
5494 		(mode_info->atom_context->bios + data_offset +
5495 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5496 	clock_info_array = (struct _ClockInfoArray *)
5497 		(mode_info->atom_context->bios + data_offset +
5498 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5499 	non_clock_info_array = (struct _NonClockInfoArray *)
5500 		(mode_info->atom_context->bios + data_offset +
5501 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5502 
5503 	rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
5504 				  state_array->ucNumEntries, GFP_KERNEL);
5505 	if (!rdev->pm.dpm.ps)
5506 		return -ENOMEM;
5507 	power_state_offset = (u8 *)state_array->states;
5508 	for (i = 0; i < state_array->ucNumEntries; i++) {
5509 		u8 *idx;
5510 		power_state = (union pplib_power_state *)power_state_offset;
5511 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
5512 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5513 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
5514 		if (!rdev->pm.power_state[i].clock_info)
5515 			return -EINVAL;
5516 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5517 		if (ps == NULL) {
5518 			kfree(rdev->pm.dpm.ps);
5519 			return -ENOMEM;
5520 		}
5521 		rdev->pm.dpm.ps[i].ps_priv = ps;
5522 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5523 					      non_clock_info,
5524 					      non_clock_info_array->ucEntrySize);
5525 		k = 0;
5526 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5527 		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5528 			clock_array_index = idx[j];
5529 			if (clock_array_index >= clock_info_array->ucNumEntries)
5530 				continue;
5531 			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5532 				break;
5533 			clock_info = (union pplib_clock_info *)
5534 				((u8 *)&clock_info_array->clockInfo[0] +
5535 				 (clock_array_index * clock_info_array->ucEntrySize));
5536 			ci_parse_pplib_clock_info(rdev,
5537 						  &rdev->pm.dpm.ps[i], k,
5538 						  clock_info);
5539 			k++;
5540 		}
5541 		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5542 	}
5543 	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5544 
5545 	/* fill in the vce power states */
5546 	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5547 		u32 sclk, mclk;
5548 		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5549 		clock_info = (union pplib_clock_info *)
5550 			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5551 		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5552 		sclk |= clock_info->ci.ucEngineClockHigh << 16;
5553 		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5554 		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5555 		rdev->pm.dpm.vce_states[i].sclk = sclk;
5556 		rdev->pm.dpm.vce_states[i].mclk = mclk;
5557 	}
5558 
5559 	return 0;
5560 }
5561 
5562 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5563 				    struct ci_vbios_boot_state *boot_state)
5564 {
5565 	struct radeon_mode_info *mode_info = &rdev->mode_info;
5566 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5567 	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5568 	u8 frev, crev;
5569 	u16 data_offset;
5570 
5571 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5572 				   &frev, &crev, &data_offset)) {
5573 		firmware_info =
5574 			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5575 						    data_offset);
5576 		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5577 		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5578 		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5579 		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5580 		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5581 		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5582 		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5583 
5584 		return 0;
5585 	}
5586 	return -EINVAL;
5587 }
5588 
5589 void ci_dpm_fini(struct radeon_device *rdev)
5590 {
5591 	int i;
5592 
5593 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5594 		kfree(rdev->pm.dpm.ps[i].ps_priv);
5595 	}
5596 	kfree(rdev->pm.dpm.ps);
5597 	kfree(rdev->pm.dpm.priv);
5598 	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5599 	r600_free_extended_power_table(rdev);
5600 }
5601 
5602 int ci_dpm_init(struct radeon_device *rdev)
5603 {
5604 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5605 	SMU7_Discrete_DpmTable  *dpm_table;
5606 	struct radeon_gpio_rec gpio;
5607 	u16 data_offset, size;
5608 	u8 frev, crev;
5609 	struct ci_power_info *pi;
5610 	int ret;
5611 	u32 mask;
5612 
5613 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5614 	if (pi == NULL)
5615 		return -ENOMEM;
5616 	rdev->pm.dpm.priv = pi;
5617 
5618 	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5619 	if (ret)
5620 		pi->sys_pcie_mask = 0;
5621 	else
5622 		pi->sys_pcie_mask = mask;
5623 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5624 
5625 	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5626 	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5627 	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5628 	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5629 
5630 	pi->pcie_lane_performance.max = 0;
5631 	pi->pcie_lane_performance.min = 16;
5632 	pi->pcie_lane_powersaving.max = 0;
5633 	pi->pcie_lane_powersaving.min = 16;
5634 
5635 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5636 	if (ret) {
5637 		ci_dpm_fini(rdev);
5638 		return ret;
5639 	}
5640 
5641 	ret = r600_get_platform_caps(rdev);
5642 	if (ret) {
5643 		ci_dpm_fini(rdev);
5644 		return ret;
5645 	}
5646 
5647 	ret = r600_parse_extended_power_table(rdev);
5648 	if (ret) {
5649 		ci_dpm_fini(rdev);
5650 		return ret;
5651 	}
5652 
5653 	ret = ci_parse_power_table(rdev);
5654 	if (ret) {
5655 		ci_dpm_fini(rdev);
5656 		return ret;
5657 	}
5658 
5659         pi->dll_default_on = false;
5660         pi->sram_end = SMC_RAM_END;
5661 
5662 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5663 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5664 	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5665 	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5666 	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5667 	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5668 	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5669 	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5670 
5671 	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5672 
5673 	pi->sclk_dpm_key_disabled = 0;
5674 	pi->mclk_dpm_key_disabled = 0;
5675 	pi->pcie_dpm_key_disabled = 0;
5676 	pi->thermal_sclk_dpm_enabled = 0;
5677 
5678 	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5679 	if ((rdev->pdev->device == 0x6658) &&
5680 	    (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5681 		pi->mclk_dpm_key_disabled = 1;
5682 	}
5683 
5684 	pi->caps_sclk_ds = true;
5685 
5686 	pi->mclk_strobe_mode_threshold = 40000;
5687 	pi->mclk_stutter_mode_threshold = 40000;
5688 	pi->mclk_edc_enable_threshold = 40000;
5689 	pi->mclk_edc_wr_enable_threshold = 40000;
5690 
5691 	ci_initialize_powertune_defaults(rdev);
5692 
5693 	pi->caps_fps = false;
5694 
5695 	pi->caps_sclk_throttle_low_notification = false;
5696 
5697 	pi->caps_uvd_dpm = true;
5698 	pi->caps_vce_dpm = true;
5699 
5700         ci_get_leakage_voltages(rdev);
5701         ci_patch_dependency_tables_with_leakage(rdev);
5702         ci_set_private_data_variables_based_on_pptable(rdev);
5703 
5704 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5705 		kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5706 	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5707 		ci_dpm_fini(rdev);
5708 		return -ENOMEM;
5709 	}
5710 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5711 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5712 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5713 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5714 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5715 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5716 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5717 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5718 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5719 
5720 	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5721 	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5722 	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5723 
5724 	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5725 	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5726 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5727 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5728 
5729 	if (rdev->family == CHIP_HAWAII) {
5730 		pi->thermal_temp_setting.temperature_low = 94500;
5731 		pi->thermal_temp_setting.temperature_high = 95000;
5732 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5733 	} else {
5734 		pi->thermal_temp_setting.temperature_low = 99500;
5735 		pi->thermal_temp_setting.temperature_high = 100000;
5736 		pi->thermal_temp_setting.temperature_shutdown = 104000;
5737 	}
5738 
5739 	pi->uvd_enabled = false;
5740 
5741 	dpm_table = &pi->smc_state_table;
5742 
5743 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5744 	if (gpio.valid) {
5745 		dpm_table->VRHotGpio = gpio.shift;
5746 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5747 	} else {
5748 		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5749 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5750 	}
5751 
5752 	gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5753 	if (gpio.valid) {
5754 		dpm_table->AcDcGpio = gpio.shift;
5755 		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5756 	} else {
5757 		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5758 		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5759 	}
5760 
5761 	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5762 	if (gpio.valid) {
5763 		u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5764 
5765 		switch (gpio.shift) {
5766 		case 0:
5767 			tmp &= ~GNB_SLOW_MODE_MASK;
5768 			tmp |= GNB_SLOW_MODE(1);
5769 			break;
5770 		case 1:
5771 			tmp &= ~GNB_SLOW_MODE_MASK;
5772 			tmp |= GNB_SLOW_MODE(2);
5773 			break;
5774 		case 2:
5775 			tmp |= GNB_SLOW;
5776 			break;
5777 		case 3:
5778 			tmp |= FORCE_NB_PS1;
5779 			break;
5780 		case 4:
5781 			tmp |= DPM_ENABLED;
5782 			break;
5783 		default:
5784 			DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
5785 			break;
5786 		}
5787 		WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5788 	}
5789 
5790 	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5791 	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5792 	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5793 	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5794 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5795 	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5796 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5797 
5798 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5799 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5800 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5801 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5802 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5803 		else
5804 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5805         }
5806 
5807 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5808 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5809 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5810 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5811 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5812 		else
5813 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5814 	}
5815 
5816 	pi->vddc_phase_shed_control = true;
5817 
5818 #if defined(CONFIG_ACPI)
5819 	pi->pcie_performance_request =
5820 		radeon_acpi_is_pcie_performance_request_supported(rdev);
5821 #else
5822 	pi->pcie_performance_request = false;
5823 #endif
5824 
5825 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5826                                    &frev, &crev, &data_offset)) {
5827 		pi->caps_sclk_ss_support = true;
5828 		pi->caps_mclk_ss_support = true;
5829 		pi->dynamic_ss = true;
5830 	} else {
5831 		pi->caps_sclk_ss_support = false;
5832 		pi->caps_mclk_ss_support = false;
5833 		pi->dynamic_ss = true;
5834 	}
5835 
5836 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5837 		pi->thermal_protection = true;
5838 	else
5839 		pi->thermal_protection = false;
5840 
5841 	pi->caps_dynamic_ac_timing = true;
5842 
5843 	pi->uvd_power_gated = false;
5844 
5845 	/* make sure dc limits are valid */
5846 	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5847 	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5848 		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5849 			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5850 
5851 	pi->fan_ctrl_is_in_default_mode = true;
5852 
5853 	return 0;
5854 }
5855 
5856 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5857 						    struct seq_file *m)
5858 {
5859 	struct ci_power_info *pi = ci_get_pi(rdev);
5860 	struct radeon_ps *rps = &pi->current_rps;
5861 	u32 sclk = ci_get_average_sclk_freq(rdev);
5862 	u32 mclk = ci_get_average_mclk_freq(rdev);
5863 
5864 	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5865 	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
5866 	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5867 		   sclk, mclk);
5868 }
5869 
5870 void ci_dpm_print_power_state(struct radeon_device *rdev,
5871 			      struct radeon_ps *rps)
5872 {
5873 	struct ci_ps *ps = ci_get_ps(rps);
5874 	struct ci_pl *pl;
5875 	int i;
5876 
5877 	r600_dpm_print_class_info(rps->class, rps->class2);
5878 	r600_dpm_print_cap_info(rps->caps);
5879 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5880 	for (i = 0; i < ps->performance_level_count; i++) {
5881 		pl = &ps->performance_levels[i];
5882 		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5883 		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5884 	}
5885 	r600_dpm_print_ps_status(rdev, rps);
5886 }
5887 
5888 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5889 {
5890 	struct ci_power_info *pi = ci_get_pi(rdev);
5891 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5892 
5893 	if (low)
5894 		return requested_state->performance_levels[0].sclk;
5895 	else
5896 		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5897 }
5898 
5899 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5900 {
5901 	struct ci_power_info *pi = ci_get_pi(rdev);
5902 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5903 
5904 	if (low)
5905 		return requested_state->performance_levels[0].mclk;
5906 	else
5907 		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5908 }
5909