1 /*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "amdgpu.h"
24 #include "amdgpu_i2c.h"
25 #include "amdgpu_atombios.h"
26 #include "atom.h"
27 #include "amd_pcie.h"
28 #include "legacy_dpm.h"
29 #include "amdgpu_dpm_internal.h"
30 #include "amdgpu_display.h"
31
32 #define amdgpu_dpm_pre_set_power_state(adev) \
33 ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
34
35 #define amdgpu_dpm_post_set_power_state(adev) \
36 ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
37
38 #define amdgpu_dpm_display_configuration_changed(adev) \
39 ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
40
41 #define amdgpu_dpm_print_power_state(adev, ps) \
42 ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
43
44 #define amdgpu_dpm_vblank_too_short(adev) \
45 ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
46
47 #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
48 ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
49
amdgpu_dpm_dbg_print_class_info(struct amdgpu_device * adev,u32 class,u32 class2)50 void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2)
51 {
52 const char *s;
53
54 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
55 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
56 default:
57 s = "none";
58 break;
59 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
60 s = "battery";
61 break;
62 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
63 s = "balanced";
64 break;
65 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
66 s = "performance";
67 break;
68 }
69 drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s);
70 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
71 (class2 == 0))
72 drm_dbg(adev_to_drm(adev), "\tinternal class: none\n");
73 else
74 drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
75 (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "",
76 (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "",
77 (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "",
78 (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "",
79 (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "",
80 (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "",
81 (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "",
82 (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "",
83 (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "",
84 (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "",
85 (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "",
86 (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "",
87 (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "",
88 (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "",
89 (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "",
90 (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : "");
91 }
92
amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device * adev,u32 caps)93 void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps)
94 {
95 drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n",
96 (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "",
97 (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "",
98 (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : "");
99 }
100
amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device * adev,struct amdgpu_ps * rps)101 void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev,
102 struct amdgpu_ps *rps)
103 {
104 drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n",
105 rps == adev->pm.dpm.current_ps ? " c" : "",
106 rps == adev->pm.dpm.requested_ps ? " r" : "",
107 rps == adev->pm.dpm.boot_ps ? " b" : "");
108 }
109
amdgpu_pm_print_power_states(struct amdgpu_device * adev)110 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
111 {
112 int i;
113
114 if (adev->powerplay.pp_funcs->print_power_state == NULL)
115 return;
116
117 for (i = 0; i < adev->pm.dpm.num_ps; i++)
118 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
119
120 }
121
122 union power_info {
123 struct _ATOM_POWERPLAY_INFO info;
124 struct _ATOM_POWERPLAY_INFO_V2 info_2;
125 struct _ATOM_POWERPLAY_INFO_V3 info_3;
126 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
127 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
128 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
129 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
130 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
131 };
132
amdgpu_get_platform_caps(struct amdgpu_device * adev)133 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
134 {
135 struct amdgpu_mode_info *mode_info = &adev->mode_info;
136 union power_info *power_info;
137 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
138 u16 data_offset;
139 u8 frev, crev;
140
141 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
142 &frev, &crev, &data_offset))
143 return -EINVAL;
144 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
145
146 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
147 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
148 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
149
150 return 0;
151 }
152
153 union fan_info {
154 struct _ATOM_PPLIB_FANTABLE fan;
155 struct _ATOM_PPLIB_FANTABLE2 fan2;
156 struct _ATOM_PPLIB_FANTABLE3 fan3;
157 };
158
amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table * amdgpu_table,ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table)159 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
160 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
161 {
162 u32 size = atom_table->ucNumEntries *
163 sizeof(struct amdgpu_clock_voltage_dependency_entry);
164 int i;
165 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
166
167 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
168 if (!amdgpu_table->entries)
169 return -ENOMEM;
170
171 entry = &atom_table->entries[0];
172 for (i = 0; i < atom_table->ucNumEntries; i++) {
173 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
174 (entry->ucClockHigh << 16);
175 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
176 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
177 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
178 }
179 amdgpu_table->count = atom_table->ucNumEntries;
180
181 return 0;
182 }
183
184 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
185 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
186 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
187 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
188 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
189 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
190 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
191 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
192 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
193
amdgpu_parse_extended_power_table(struct amdgpu_device * adev)194 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
195 {
196 struct amdgpu_mode_info *mode_info = &adev->mode_info;
197 union power_info *power_info;
198 union fan_info *fan_info;
199 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
200 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
201 u16 data_offset;
202 u8 frev, crev;
203 int ret, i;
204
205 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
206 &frev, &crev, &data_offset))
207 return -EINVAL;
208 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
209
210 /* fan table */
211 if (le16_to_cpu(power_info->pplib.usTableSize) >=
212 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
213 if (power_info->pplib3.usFanTableOffset) {
214 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
215 le16_to_cpu(power_info->pplib3.usFanTableOffset));
216 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
217 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
218 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
219 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
220 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
221 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
222 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
223 if (fan_info->fan.ucFanTableFormat >= 2)
224 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
225 else
226 adev->pm.dpm.fan.t_max = 10900;
227 adev->pm.dpm.fan.cycle_delay = 100000;
228 if (fan_info->fan.ucFanTableFormat >= 3) {
229 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
230 adev->pm.dpm.fan.default_max_fan_pwm =
231 le16_to_cpu(fan_info->fan3.usFanPWMMax);
232 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
233 adev->pm.dpm.fan.fan_output_sensitivity =
234 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
235 }
236 adev->pm.dpm.fan.ucode_fan_control = true;
237 }
238 }
239
240 /* clock dependancy tables, shedding tables */
241 if (le16_to_cpu(power_info->pplib.usTableSize) >=
242 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
243 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
244 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
245 (mode_info->atom_context->bios + data_offset +
246 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
247 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
248 dep_table);
249 if (ret)
250 return ret;
251 }
252 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
253 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
254 (mode_info->atom_context->bios + data_offset +
255 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
256 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
257 dep_table);
258 if (ret)
259 return ret;
260 }
261 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
262 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
263 (mode_info->atom_context->bios + data_offset +
264 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
265 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
266 dep_table);
267 if (ret)
268 return ret;
269 }
270 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
271 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
272 (mode_info->atom_context->bios + data_offset +
273 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
274 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
275 dep_table);
276 if (ret)
277 return ret;
278 }
279 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
280 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
281 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
282 (mode_info->atom_context->bios + data_offset +
283 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
284 if (clk_v->ucNumEntries) {
285 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
286 le16_to_cpu(clk_v->entries[0].usSclkLow) |
287 (clk_v->entries[0].ucSclkHigh << 16);
288 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
289 le16_to_cpu(clk_v->entries[0].usMclkLow) |
290 (clk_v->entries[0].ucMclkHigh << 16);
291 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
292 le16_to_cpu(clk_v->entries[0].usVddc);
293 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
294 le16_to_cpu(clk_v->entries[0].usVddci);
295 }
296 }
297 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
298 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
299 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
300 (mode_info->atom_context->bios + data_offset +
301 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
302 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
303
304 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
305 kzalloc_objs(struct amdgpu_phase_shedding_limits_entry,
306 psl->ucNumEntries);
307 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
308 return -ENOMEM;
309
310 entry = &psl->entries[0];
311 for (i = 0; i < psl->ucNumEntries; i++) {
312 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
313 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
314 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
315 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
316 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
317 le16_to_cpu(entry->usVoltage);
318 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
319 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
320 }
321 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
322 psl->ucNumEntries;
323 }
324 }
325
326 /* cac data */
327 if (le16_to_cpu(power_info->pplib.usTableSize) >=
328 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
329 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
330 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
331 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
332 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
333 if (adev->pm.dpm.tdp_od_limit)
334 adev->pm.dpm.power_control = true;
335 else
336 adev->pm.dpm.power_control = false;
337 adev->pm.dpm.tdp_adjustment = 0;
338 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
339 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
340 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
341 if (power_info->pplib5.usCACLeakageTableOffset) {
342 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
343 (ATOM_PPLIB_CAC_Leakage_Table *)
344 (mode_info->atom_context->bios + data_offset +
345 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
346 ATOM_PPLIB_CAC_Leakage_Record *entry;
347 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
348 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
349 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
350 return -ENOMEM;
351 entry = &cac_table->entries[0];
352 for (i = 0; i < cac_table->ucNumEntries; i++) {
353 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
354 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
355 le16_to_cpu(entry->usVddc1);
356 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
357 le16_to_cpu(entry->usVddc2);
358 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
359 le16_to_cpu(entry->usVddc3);
360 } else {
361 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
362 le16_to_cpu(entry->usVddc);
363 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
364 le32_to_cpu(entry->ulLeakageValue);
365 }
366 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
367 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
368 }
369 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
370 }
371 }
372
373 /* ext tables */
374 if (le16_to_cpu(power_info->pplib.usTableSize) >=
375 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
376 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
377 (mode_info->atom_context->bios + data_offset +
378 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
379 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
380 ext_hdr->usVCETableOffset) {
381 VCEClockInfoArray *array = (VCEClockInfoArray *)
382 (mode_info->atom_context->bios + data_offset +
383 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
384 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
385 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
386 (mode_info->atom_context->bios + data_offset +
387 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
388 1 + array->ucNumEntries * sizeof(VCEClockInfo));
389 ATOM_PPLIB_VCE_State_Table *states =
390 (ATOM_PPLIB_VCE_State_Table *)
391 (mode_info->atom_context->bios + data_offset +
392 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
393 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
394 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
395 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
396 ATOM_PPLIB_VCE_State_Record *state_entry;
397 VCEClockInfo *vce_clk;
398 u32 size = limits->numEntries *
399 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
400 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
401 kzalloc(size, GFP_KERNEL);
402 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
403 return -ENOMEM;
404 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
405 limits->numEntries;
406 entry = &limits->entries[0];
407 state_entry = &states->entries[0];
408 for (i = 0; i < limits->numEntries; i++) {
409 vce_clk = (VCEClockInfo *)
410 ((u8 *)&array->entries[0] +
411 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
412 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
413 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
414 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
415 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
416 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
417 le16_to_cpu(entry->usVoltage);
418 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
419 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
420 }
421 adev->pm.dpm.num_of_vce_states =
422 states->numEntries > AMD_MAX_VCE_LEVELS ?
423 AMD_MAX_VCE_LEVELS : states->numEntries;
424 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
425 vce_clk = (VCEClockInfo *)
426 ((u8 *)&array->entries[0] +
427 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
428 adev->pm.dpm.vce_states[i].evclk =
429 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
430 adev->pm.dpm.vce_states[i].ecclk =
431 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
432 adev->pm.dpm.vce_states[i].clk_idx =
433 state_entry->ucClockInfoIndex & 0x3f;
434 adev->pm.dpm.vce_states[i].pstate =
435 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
436 state_entry = (ATOM_PPLIB_VCE_State_Record *)
437 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
438 }
439 }
440 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
441 ext_hdr->usUVDTableOffset) {
442 UVDClockInfoArray *array = (UVDClockInfoArray *)
443 (mode_info->atom_context->bios + data_offset +
444 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
445 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
446 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
447 (mode_info->atom_context->bios + data_offset +
448 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
449 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
450 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
451 u32 size = limits->numEntries *
452 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
453 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
454 kzalloc(size, GFP_KERNEL);
455 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
456 return -ENOMEM;
457 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
458 limits->numEntries;
459 entry = &limits->entries[0];
460 for (i = 0; i < limits->numEntries; i++) {
461 UVDClockInfo *uvd_clk = (UVDClockInfo *)
462 ((u8 *)&array->entries[0] +
463 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
464 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
465 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
466 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
467 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
468 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
469 le16_to_cpu(entry->usVoltage);
470 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
471 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
472 }
473 }
474 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
475 ext_hdr->usSAMUTableOffset) {
476 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
477 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
478 (mode_info->atom_context->bios + data_offset +
479 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
480 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
481 u32 size = limits->numEntries *
482 sizeof(struct amdgpu_clock_voltage_dependency_entry);
483 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
484 kzalloc(size, GFP_KERNEL);
485 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
486 return -ENOMEM;
487 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
488 limits->numEntries;
489 entry = &limits->entries[0];
490 for (i = 0; i < limits->numEntries; i++) {
491 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
492 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
493 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
494 le16_to_cpu(entry->usVoltage);
495 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
496 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
497 }
498 }
499 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
500 ext_hdr->usPPMTableOffset) {
501 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
502 (mode_info->atom_context->bios + data_offset +
503 le16_to_cpu(ext_hdr->usPPMTableOffset));
504 adev->pm.dpm.dyn_state.ppm_table =
505 kzalloc_obj(struct amdgpu_ppm_table);
506 if (!adev->pm.dpm.dyn_state.ppm_table)
507 return -ENOMEM;
508 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
509 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
510 le16_to_cpu(ppm->usCpuCoreNumber);
511 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
512 le32_to_cpu(ppm->ulPlatformTDP);
513 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
514 le32_to_cpu(ppm->ulSmallACPlatformTDP);
515 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
516 le32_to_cpu(ppm->ulPlatformTDC);
517 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
518 le32_to_cpu(ppm->ulSmallACPlatformTDC);
519 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
520 le32_to_cpu(ppm->ulApuTDP);
521 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
522 le32_to_cpu(ppm->ulDGpuTDP);
523 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
524 le32_to_cpu(ppm->ulDGpuUlvPower);
525 adev->pm.dpm.dyn_state.ppm_table->tj_max =
526 le32_to_cpu(ppm->ulTjmax);
527 }
528 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
529 ext_hdr->usACPTableOffset) {
530 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
531 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
532 (mode_info->atom_context->bios + data_offset +
533 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
534 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
535 u32 size = limits->numEntries *
536 sizeof(struct amdgpu_clock_voltage_dependency_entry);
537 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
538 kzalloc(size, GFP_KERNEL);
539 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
540 return -ENOMEM;
541 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
542 limits->numEntries;
543 entry = &limits->entries[0];
544 for (i = 0; i < limits->numEntries; i++) {
545 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
546 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
547 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
548 le16_to_cpu(entry->usVoltage);
549 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
550 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
551 }
552 }
553 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
554 ext_hdr->usPowerTuneTableOffset) {
555 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
556 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
557 ATOM_PowerTune_Table *pt;
558 adev->pm.dpm.dyn_state.cac_tdp_table =
559 kzalloc_obj(struct amdgpu_cac_tdp_table);
560 if (!adev->pm.dpm.dyn_state.cac_tdp_table)
561 return -ENOMEM;
562 if (rev > 0) {
563 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
564 (mode_info->atom_context->bios + data_offset +
565 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
566 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
567 ppt->usMaximumPowerDeliveryLimit;
568 pt = &ppt->power_tune_table;
569 } else {
570 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
571 (mode_info->atom_context->bios + data_offset +
572 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
573 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
574 pt = &ppt->power_tune_table;
575 }
576 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
577 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
578 le16_to_cpu(pt->usConfigurableTDP);
579 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
580 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
581 le16_to_cpu(pt->usBatteryPowerLimit);
582 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
583 le16_to_cpu(pt->usSmallPowerLimit);
584 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
585 le16_to_cpu(pt->usLowCACLeakage);
586 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
587 le16_to_cpu(pt->usHighCACLeakage);
588 }
589 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
590 ext_hdr->usSclkVddgfxTableOffset) {
591 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
592 (mode_info->atom_context->bios + data_offset +
593 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
594 ret = amdgpu_parse_clk_voltage_dep_table(
595 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
596 dep_table);
597 if (ret)
598 return ret;
599 }
600 }
601
602 return 0;
603 }
604
amdgpu_free_extended_power_table(struct amdgpu_device * adev)605 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
606 {
607 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
608
609 kfree(dyn_state->vddc_dependency_on_sclk.entries);
610 kfree(dyn_state->vddci_dependency_on_mclk.entries);
611 kfree(dyn_state->vddc_dependency_on_mclk.entries);
612 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
613 kfree(dyn_state->cac_leakage_table.entries);
614 kfree(dyn_state->phase_shedding_limits_table.entries);
615 kfree(dyn_state->ppm_table);
616 kfree(dyn_state->cac_tdp_table);
617 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
618 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
619 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
620 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
621 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
622 }
623
624 static const char *pp_lib_thermal_controller_names[] = {
625 "NONE",
626 "lm63",
627 "adm1032",
628 "adm1030",
629 "max6649",
630 "lm64",
631 "f75375",
632 "RV6xx",
633 "RV770",
634 "adt7473",
635 "NONE",
636 "External GPIO",
637 "Evergreen",
638 "emc2103",
639 "Sumo",
640 "Northern Islands",
641 "Southern Islands",
642 "lm96163",
643 "Sea Islands",
644 "Kaveri/Kabini",
645 };
646
amdgpu_add_thermal_controller(struct amdgpu_device * adev)647 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
648 {
649 struct amdgpu_mode_info *mode_info = &adev->mode_info;
650 ATOM_PPLIB_POWERPLAYTABLE *power_table;
651 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
652 ATOM_PPLIB_THERMALCONTROLLER *controller;
653 struct amdgpu_i2c_bus_rec i2c_bus;
654 u16 data_offset;
655 u8 frev, crev;
656
657 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
658 &frev, &crev, &data_offset))
659 return;
660 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
661 (mode_info->atom_context->bios + data_offset);
662 controller = &power_table->sThermalController;
663
664 /* add the i2c bus for thermal/fan chip */
665 if (controller->ucType > 0) {
666 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
667 adev->pm.no_fan = true;
668 adev->pm.fan_pulses_per_revolution =
669 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
670 if (adev->pm.fan_pulses_per_revolution) {
671 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
672 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
673 }
674 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
675 drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
676 (controller->ucFanParameters &
677 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
678 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
679 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
680 drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
681 (controller->ucFanParameters &
682 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
683 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
684 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
685 drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
686 (controller->ucFanParameters &
687 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
688 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
689 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
690 drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
691 (controller->ucFanParameters &
692 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
693 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
694 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
695 drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
696 (controller->ucFanParameters &
697 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
698 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
699 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
700 drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
701 (controller->ucFanParameters &
702 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
703 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
704 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
705 drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
706 (controller->ucFanParameters &
707 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
708 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
709 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
710 drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
711 (controller->ucFanParameters &
712 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
713 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
714 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
715 drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n",
716 (controller->ucFanParameters &
717 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
718 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
719 } else if (controller->ucType ==
720 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
721 drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n",
722 (controller->ucFanParameters &
723 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
724 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
725 } else if (controller->ucType ==
726 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
727 drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n",
728 (controller->ucFanParameters &
729 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
730 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
731 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
732 drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n",
733 pp_lib_thermal_controller_names[controller->ucType],
734 controller->ucI2cAddress >> 1,
735 (controller->ucFanParameters &
736 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
737 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
738 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
739 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
740 if (adev->pm.i2c_bus) {
741 struct i2c_board_info info = { };
742 const char *name = pp_lib_thermal_controller_names[controller->ucType];
743 info.addr = controller->ucI2cAddress >> 1;
744 strscpy(info.type, name, sizeof(info.type));
745 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
746 }
747 } else {
748 drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n",
749 controller->ucType,
750 controller->ucI2cAddress >> 1,
751 (controller->ucFanParameters &
752 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
753 }
754 }
755 }
756
amdgpu_get_vce_clock_state(void * handle,u32 idx)757 struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx)
758 {
759 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
760
761 if (idx < adev->pm.dpm.num_of_vce_states)
762 return &adev->pm.dpm.vce_states[idx];
763
764 return NULL;
765 }
766
amdgpu_dpm_pick_power_state(struct amdgpu_device * adev,enum amd_pm_state_type dpm_state)767 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
768 enum amd_pm_state_type dpm_state)
769 {
770 int i;
771 struct amdgpu_ps *ps;
772 u32 ui_class;
773 bool single_display = adev->pm.pm_display_cfg.num_display < 2;
774
775 /* check if the vblank period is too short to adjust the mclk */
776 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
777 if (amdgpu_dpm_vblank_too_short(adev))
778 single_display = false;
779 }
780
781 /* certain older asics have a separare 3D performance state,
782 * so try that first if the user selected performance
783 */
784 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
785 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
786 /* balanced states don't exist at the moment */
787 if (dpm_state == POWER_STATE_TYPE_BALANCED)
788 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
789
790 restart_search:
791 /* Pick the best power state based on current conditions */
792 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
793 ps = &adev->pm.dpm.ps[i];
794 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
795 switch (dpm_state) {
796 /* user states */
797 case POWER_STATE_TYPE_BATTERY:
798 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
799 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
800 if (single_display)
801 return ps;
802 } else
803 return ps;
804 }
805 break;
806 case POWER_STATE_TYPE_PERFORMANCE:
807 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
808 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
809 if (single_display)
810 return ps;
811 } else
812 return ps;
813 }
814 break;
815 /* internal states */
816 case POWER_STATE_TYPE_INTERNAL_UVD:
817 if (adev->pm.dpm.uvd_ps)
818 return adev->pm.dpm.uvd_ps;
819 else
820 break;
821 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
822 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
823 return ps;
824 break;
825 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
826 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
827 return ps;
828 break;
829 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
830 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
831 return ps;
832 break;
833 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
834 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
835 return ps;
836 break;
837 case POWER_STATE_TYPE_INTERNAL_BOOT:
838 return adev->pm.dpm.boot_ps;
839 case POWER_STATE_TYPE_INTERNAL_THERMAL:
840 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
841 return ps;
842 break;
843 case POWER_STATE_TYPE_INTERNAL_ACPI:
844 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
845 return ps;
846 break;
847 case POWER_STATE_TYPE_INTERNAL_ULV:
848 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
849 return ps;
850 break;
851 case POWER_STATE_TYPE_INTERNAL_3DPERF:
852 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
853 return ps;
854 break;
855 default:
856 break;
857 }
858 }
859 /* use a fallback state if we didn't match */
860 switch (dpm_state) {
861 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
862 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
863 goto restart_search;
864 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
865 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
866 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
867 if (adev->pm.dpm.uvd_ps) {
868 return adev->pm.dpm.uvd_ps;
869 } else {
870 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
871 goto restart_search;
872 }
873 case POWER_STATE_TYPE_INTERNAL_THERMAL:
874 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
875 goto restart_search;
876 case POWER_STATE_TYPE_INTERNAL_ACPI:
877 dpm_state = POWER_STATE_TYPE_BATTERY;
878 goto restart_search;
879 case POWER_STATE_TYPE_BATTERY:
880 case POWER_STATE_TYPE_BALANCED:
881 case POWER_STATE_TYPE_INTERNAL_3DPERF:
882 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
883 goto restart_search;
884 default:
885 break;
886 }
887
888 return NULL;
889 }
890
amdgpu_dpm_change_power_state_locked(struct amdgpu_device * adev)891 static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
892 {
893 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
894 struct amdgpu_ps *ps;
895 enum amd_pm_state_type dpm_state;
896 int ret;
897 bool equal = false;
898
899 /* if dpm init failed */
900 if (!adev->pm.dpm_enabled)
901 return 0;
902
903 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
904 /* add other state override checks here */
905 if ((!adev->pm.dpm.thermal_active) &&
906 (!adev->pm.dpm.uvd_active))
907 adev->pm.dpm.state = adev->pm.dpm.user_state;
908 }
909 dpm_state = adev->pm.dpm.state;
910
911 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
912 if (ps)
913 adev->pm.dpm.requested_ps = ps;
914 else
915 return -EINVAL;
916
917 if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
918 drm_dbg(adev_to_drm(adev), "switching from power state\n");
919 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
920 drm_dbg(adev_to_drm(adev), "switching to power state\n");
921 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
922 }
923
924 /* update whether vce is active */
925 ps->vce_active = adev->pm.dpm.vce_active;
926 if (pp_funcs->display_configuration_changed)
927 amdgpu_dpm_display_configuration_changed(adev);
928
929 ret = amdgpu_dpm_pre_set_power_state(adev);
930 if (ret)
931 return ret;
932
933 if (pp_funcs->check_state_equal) {
934 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
935 equal = false;
936 }
937
938 if (equal)
939 return 0;
940
941 if (pp_funcs->set_power_state)
942 pp_funcs->set_power_state(adev->powerplay.pp_handle);
943
944 amdgpu_dpm_post_set_power_state(adev);
945
946 if (pp_funcs->force_performance_level) {
947 if (adev->pm.dpm.thermal_active) {
948 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
949 /* force low perf level for thermal */
950 pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
951 /* save the user's level */
952 adev->pm.dpm.forced_level = level;
953 } else {
954 /* otherwise, user selected level */
955 pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
956 }
957 }
958
959 return 0;
960 }
961
amdgpu_legacy_dpm_compute_clocks(void * handle)962 void amdgpu_legacy_dpm_compute_clocks(void *handle)
963 {
964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
965
966 if (!adev->dc_enabled)
967 amdgpu_dpm_get_display_cfg(adev);
968
969 amdgpu_dpm_change_power_state_locked(adev);
970 }
971
amdgpu_dpm_thermal_work_handler(struct work_struct * work)972 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
973 {
974 struct amdgpu_device *adev =
975 container_of(work, struct amdgpu_device,
976 pm.dpm.thermal.work);
977 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
978 /* switch to the thermal state */
979 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
980 int temp, size = sizeof(temp);
981
982 mutex_lock(&adev->pm.mutex);
983
984 if (!adev->pm.dpm_enabled) {
985 mutex_unlock(&adev->pm.mutex);
986 return;
987 }
988 if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
989 AMDGPU_PP_SENSOR_GPU_TEMP,
990 (void *)&temp,
991 &size)) {
992 if (temp < adev->pm.dpm.thermal.min_temp)
993 /* switch back the user state */
994 dpm_state = adev->pm.dpm.user_state;
995 } else {
996 if (adev->pm.dpm.thermal.high_to_low)
997 /* switch back the user state */
998 dpm_state = adev->pm.dpm.user_state;
999 }
1000
1001 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1002 adev->pm.dpm.thermal_active = true;
1003 else
1004 adev->pm.dpm.thermal_active = false;
1005
1006 adev->pm.dpm.state = dpm_state;
1007
1008 amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
1009 mutex_unlock(&adev->pm.mutex);
1010 }
1011