1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 int ret = 0;
45
46 if (!pp_funcs->get_sclk)
47 return 0;
48
49 mutex_lock(&adev->pm.mutex);
50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 low);
52 mutex_unlock(&adev->pm.mutex);
53
54 return ret;
55 }
56
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 int ret = 0;
61
62 if (!pp_funcs->get_mclk)
63 return 0;
64
65 mutex_lock(&adev->pm.mutex);
66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 low);
68 mutex_unlock(&adev->pm.mutex);
69
70 return ret;
71 }
72
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate,int inst)73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
74 uint32_t block_type,
75 bool gate,
76 int inst)
77 {
78 int ret = 0;
79 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
80 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
81 bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
82
83 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
84 (!is_vcn || adev->vcn.num_vcn_inst == 1)) {
85 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
86 block_type, gate ? "gate" : "ungate");
87 return 0;
88 }
89
90 mutex_lock(&adev->pm.mutex);
91
92 switch (block_type) {
93 case AMD_IP_BLOCK_TYPE_UVD:
94 case AMD_IP_BLOCK_TYPE_VCE:
95 case AMD_IP_BLOCK_TYPE_GFX:
96 case AMD_IP_BLOCK_TYPE_SDMA:
97 case AMD_IP_BLOCK_TYPE_JPEG:
98 case AMD_IP_BLOCK_TYPE_GMC:
99 case AMD_IP_BLOCK_TYPE_ACP:
100 case AMD_IP_BLOCK_TYPE_VPE:
101 if (pp_funcs && pp_funcs->set_powergating_by_smu)
102 ret = (pp_funcs->set_powergating_by_smu(
103 (adev)->powerplay.pp_handle, block_type, gate, 0));
104 break;
105 case AMD_IP_BLOCK_TYPE_VCN:
106 if (pp_funcs && pp_funcs->set_powergating_by_smu)
107 ret = (pp_funcs->set_powergating_by_smu(
108 (adev)->powerplay.pp_handle, block_type, gate, inst));
109 break;
110 default:
111 break;
112 }
113
114 if (!ret)
115 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
116
117 mutex_unlock(&adev->pm.mutex);
118
119 return ret;
120 }
121
amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device * adev)122 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
123 {
124 struct smu_context *smu = adev->powerplay.pp_handle;
125 int ret = -EOPNOTSUPP;
126
127 mutex_lock(&adev->pm.mutex);
128 ret = smu_set_gfx_power_up_by_imu(smu);
129 mutex_unlock(&adev->pm.mutex);
130
131 msleep(10);
132
133 return ret;
134 }
135
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)136 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
137 {
138 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
139 void *pp_handle = adev->powerplay.pp_handle;
140 int ret = 0;
141
142 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
143 return -ENOENT;
144
145 mutex_lock(&adev->pm.mutex);
146
147 /* enter BACO state */
148 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
149
150 mutex_unlock(&adev->pm.mutex);
151
152 return ret;
153 }
154
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)155 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
156 {
157 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
158 void *pp_handle = adev->powerplay.pp_handle;
159 int ret = 0;
160
161 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
162 return -ENOENT;
163
164 mutex_lock(&adev->pm.mutex);
165
166 /* exit BACO state */
167 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
168
169 mutex_unlock(&adev->pm.mutex);
170
171 return ret;
172 }
173
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)174 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
175 enum pp_mp1_state mp1_state)
176 {
177 int ret = 0;
178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
179
180 if (mp1_state == PP_MP1_STATE_FLR) {
181 /* VF lost access to SMU */
182 if (amdgpu_sriov_vf(adev))
183 adev->pm.dpm_enabled = false;
184 } else if (pp_funcs && pp_funcs->set_mp1_state) {
185 mutex_lock(&adev->pm.mutex);
186
187 ret = pp_funcs->set_mp1_state(
188 adev->powerplay.pp_handle,
189 mp1_state);
190
191 mutex_unlock(&adev->pm.mutex);
192 }
193
194 return ret;
195 }
196
amdgpu_dpm_notify_rlc_state(struct amdgpu_device * adev,bool en)197 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
198 {
199 int ret = 0;
200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
201
202 if (pp_funcs && pp_funcs->notify_rlc_state) {
203 mutex_lock(&adev->pm.mutex);
204
205 ret = pp_funcs->notify_rlc_state(
206 adev->powerplay.pp_handle,
207 en);
208
209 mutex_unlock(&adev->pm.mutex);
210 }
211
212 return ret;
213 }
214
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)215 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
216 {
217 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
218 void *pp_handle = adev->powerplay.pp_handle;
219 int ret;
220
221 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
222 return 0;
223 /* Don't use baco for reset in S3.
224 * This is a workaround for some platforms
225 * where entering BACO during suspend
226 * seems to cause reboots or hangs.
227 * This might be related to the fact that BACO controls
228 * power to the whole GPU including devices like audio and USB.
229 * Powering down/up everything may adversely affect these other
230 * devices. Needs more investigation.
231 */
232 if (adev->in_s3)
233 return 0;
234
235 mutex_lock(&adev->pm.mutex);
236
237 ret = pp_funcs->get_asic_baco_capability(pp_handle);
238
239 mutex_unlock(&adev->pm.mutex);
240
241 return ret;
242 }
243
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)244 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
245 {
246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
247 void *pp_handle = adev->powerplay.pp_handle;
248 int ret = 0;
249
250 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
251 return -ENOENT;
252
253 mutex_lock(&adev->pm.mutex);
254
255 ret = pp_funcs->asic_reset_mode_2(pp_handle);
256
257 mutex_unlock(&adev->pm.mutex);
258
259 return ret;
260 }
261
amdgpu_dpm_enable_gfx_features(struct amdgpu_device * adev)262 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
263 {
264 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
265 void *pp_handle = adev->powerplay.pp_handle;
266 int ret = 0;
267
268 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
269 return -ENOENT;
270
271 mutex_lock(&adev->pm.mutex);
272
273 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
274
275 mutex_unlock(&adev->pm.mutex);
276
277 return ret;
278 }
279
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)280 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
281 {
282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
283 void *pp_handle = adev->powerplay.pp_handle;
284 int ret = 0;
285
286 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
287 return -ENOENT;
288
289 mutex_lock(&adev->pm.mutex);
290
291 /* enter BACO state */
292 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
293 if (ret)
294 goto out;
295
296 /* exit BACO state */
297 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
298
299 out:
300 mutex_unlock(&adev->pm.mutex);
301 return ret;
302 }
303
amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device * adev)304 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
305 {
306 struct smu_context *smu = adev->powerplay.pp_handle;
307 bool support_mode1_reset = false;
308
309 if (is_support_sw_smu(adev)) {
310 mutex_lock(&adev->pm.mutex);
311 support_mode1_reset = smu_mode1_reset_is_support(smu);
312 mutex_unlock(&adev->pm.mutex);
313 }
314
315 return support_mode1_reset;
316 }
317
amdgpu_dpm_mode1_reset(struct amdgpu_device * adev)318 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
319 {
320 struct smu_context *smu = adev->powerplay.pp_handle;
321 int ret = -EOPNOTSUPP;
322
323 if (is_support_sw_smu(adev)) {
324 mutex_lock(&adev->pm.mutex);
325 ret = smu_mode1_reset(smu);
326 mutex_unlock(&adev->pm.mutex);
327 }
328
329 return ret;
330 }
331
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)332 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
333 enum PP_SMC_POWER_PROFILE type,
334 bool en)
335 {
336 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
337 int ret = 0;
338
339 if (amdgpu_sriov_vf(adev))
340 return 0;
341
342 if (pp_funcs && pp_funcs->switch_power_profile) {
343 mutex_lock(&adev->pm.mutex);
344 ret = pp_funcs->switch_power_profile(
345 adev->powerplay.pp_handle, type, en);
346 mutex_unlock(&adev->pm.mutex);
347 }
348
349 return ret;
350 }
351
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)352 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
353 uint32_t pstate)
354 {
355 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
356 int ret = 0;
357
358 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
359 mutex_lock(&adev->pm.mutex);
360 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
361 pstate);
362 mutex_unlock(&adev->pm.mutex);
363 }
364
365 return ret;
366 }
367
amdgpu_dpm_set_df_cstate(struct amdgpu_device * adev,uint32_t cstate)368 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
369 uint32_t cstate)
370 {
371 int ret = 0;
372 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
373 void *pp_handle = adev->powerplay.pp_handle;
374
375 if (pp_funcs && pp_funcs->set_df_cstate) {
376 mutex_lock(&adev->pm.mutex);
377 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
378 mutex_unlock(&adev->pm.mutex);
379 }
380
381 return ret;
382 }
383
amdgpu_dpm_get_pm_policy_info(struct amdgpu_device * adev,enum pp_pm_policy p_type,char * buf)384 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
385 enum pp_pm_policy p_type, char *buf)
386 {
387 struct smu_context *smu = adev->powerplay.pp_handle;
388 int ret = -EOPNOTSUPP;
389
390 if (is_support_sw_smu(adev)) {
391 mutex_lock(&adev->pm.mutex);
392 ret = smu_get_pm_policy_info(smu, p_type, buf);
393 mutex_unlock(&adev->pm.mutex);
394 }
395
396 return ret;
397 }
398
amdgpu_dpm_set_pm_policy(struct amdgpu_device * adev,int policy_type,int policy_level)399 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
400 int policy_level)
401 {
402 struct smu_context *smu = adev->powerplay.pp_handle;
403 int ret = -EOPNOTSUPP;
404
405 if (is_support_sw_smu(adev)) {
406 mutex_lock(&adev->pm.mutex);
407 ret = smu_set_pm_policy(smu, policy_type, policy_level);
408 mutex_unlock(&adev->pm.mutex);
409 }
410
411 return ret;
412 }
413
amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device * adev)414 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
415 {
416 void *pp_handle = adev->powerplay.pp_handle;
417 const struct amd_pm_funcs *pp_funcs =
418 adev->powerplay.pp_funcs;
419 int ret = 0;
420
421 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
422 mutex_lock(&adev->pm.mutex);
423 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
424 mutex_unlock(&adev->pm.mutex);
425 }
426
427 return ret;
428 }
429
amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device * adev,uint32_t msg_id)430 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
431 uint32_t msg_id)
432 {
433 void *pp_handle = adev->powerplay.pp_handle;
434 const struct amd_pm_funcs *pp_funcs =
435 adev->powerplay.pp_funcs;
436 int ret = 0;
437
438 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
439 mutex_lock(&adev->pm.mutex);
440 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
441 msg_id);
442 mutex_unlock(&adev->pm.mutex);
443 }
444
445 return ret;
446 }
447
amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device * adev,bool acquire)448 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
449 bool acquire)
450 {
451 void *pp_handle = adev->powerplay.pp_handle;
452 const struct amd_pm_funcs *pp_funcs =
453 adev->powerplay.pp_funcs;
454 int ret = -EOPNOTSUPP;
455
456 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
457 mutex_lock(&adev->pm.mutex);
458 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
459 acquire);
460 mutex_unlock(&adev->pm.mutex);
461 }
462
463 return ret;
464 }
465
amdgpu_pm_acpi_event_handler(struct amdgpu_device * adev)466 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
467 {
468 if (adev->pm.dpm_enabled) {
469 mutex_lock(&adev->pm.mutex);
470 if (power_supply_is_system_supplied() > 0)
471 adev->pm.ac_power = true;
472 else
473 adev->pm.ac_power = false;
474
475 if (adev->powerplay.pp_funcs &&
476 adev->powerplay.pp_funcs->enable_bapm)
477 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
478
479 if (is_support_sw_smu(adev))
480 smu_set_ac_dc(adev->powerplay.pp_handle);
481
482 mutex_unlock(&adev->pm.mutex);
483 }
484 }
485
amdgpu_dpm_read_sensor(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * data,uint32_t * size)486 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
487 void *data, uint32_t *size)
488 {
489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
490 int ret = -EINVAL;
491
492 if (!data || !size)
493 return -EINVAL;
494
495 if (pp_funcs && pp_funcs->read_sensor) {
496 mutex_lock(&adev->pm.mutex);
497 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
498 sensor,
499 data,
500 size);
501 mutex_unlock(&adev->pm.mutex);
502 }
503
504 return ret;
505 }
506
amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device * adev,uint32_t * limit)507 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
508 {
509 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
510 int ret = -EOPNOTSUPP;
511
512 if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
513 mutex_lock(&adev->pm.mutex);
514 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
515 mutex_unlock(&adev->pm.mutex);
516 }
517
518 return ret;
519 }
520
amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device * adev,uint32_t limit)521 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
522 {
523 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
524 int ret = -EOPNOTSUPP;
525
526 if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
527 mutex_lock(&adev->pm.mutex);
528 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
529 mutex_unlock(&adev->pm.mutex);
530 }
531
532 return ret;
533 }
534
amdgpu_dpm_compute_clocks(struct amdgpu_device * adev)535 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
536 {
537 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
538 int i;
539
540 if (!adev->pm.dpm_enabled)
541 return;
542
543 if (!pp_funcs->pm_compute_clocks)
544 return;
545
546 if (adev->mode_info.num_crtc)
547 amdgpu_display_bandwidth_update(adev);
548
549 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
550 struct amdgpu_ring *ring = adev->rings[i];
551 if (ring && ring->sched.ready)
552 amdgpu_fence_wait_empty(ring);
553 }
554
555 mutex_lock(&adev->pm.mutex);
556 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
557 mutex_unlock(&adev->pm.mutex);
558 }
559
amdgpu_dpm_enable_uvd(struct amdgpu_device * adev,bool enable)560 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
561 {
562 int ret = 0;
563
564 if (adev->family == AMDGPU_FAMILY_SI) {
565 mutex_lock(&adev->pm.mutex);
566 if (enable) {
567 adev->pm.dpm.uvd_active = true;
568 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
569 } else {
570 adev->pm.dpm.uvd_active = false;
571 }
572 mutex_unlock(&adev->pm.mutex);
573
574 amdgpu_dpm_compute_clocks(adev);
575 return;
576 }
577
578 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
579 if (ret)
580 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
581 enable ? "enable" : "disable", ret);
582 }
583
amdgpu_dpm_enable_vcn(struct amdgpu_device * adev,bool enable,int inst)584 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
585 {
586 int ret = 0;
587
588 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
589 if (ret)
590 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
591 enable ? "enable" : "disable", ret);
592 }
593
amdgpu_dpm_enable_vce(struct amdgpu_device * adev,bool enable)594 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
595 {
596 int ret = 0;
597
598 if (adev->family == AMDGPU_FAMILY_SI) {
599 mutex_lock(&adev->pm.mutex);
600 if (enable) {
601 adev->pm.dpm.vce_active = true;
602 /* XXX select vce level based on ring/task */
603 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
604 } else {
605 adev->pm.dpm.vce_active = false;
606 }
607 mutex_unlock(&adev->pm.mutex);
608
609 amdgpu_dpm_compute_clocks(adev);
610 return;
611 }
612
613 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
614 if (ret)
615 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
616 enable ? "enable" : "disable", ret);
617 }
618
amdgpu_dpm_enable_jpeg(struct amdgpu_device * adev,bool enable)619 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
620 {
621 int ret = 0;
622
623 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
624 if (ret)
625 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
626 enable ? "enable" : "disable", ret);
627 }
628
amdgpu_dpm_enable_vpe(struct amdgpu_device * adev,bool enable)629 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
630 {
631 int ret = 0;
632
633 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
634 if (ret)
635 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
636 enable ? "enable" : "disable", ret);
637 }
638
amdgpu_pm_load_smu_firmware(struct amdgpu_device * adev,uint32_t * smu_version)639 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
640 {
641 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
642 int r = 0;
643
644 if (!pp_funcs || !pp_funcs->load_firmware ||
645 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
646 return 0;
647
648 mutex_lock(&adev->pm.mutex);
649 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
650 if (r) {
651 pr_err("smu firmware loading failed\n");
652 goto out;
653 }
654
655 if (smu_version)
656 *smu_version = adev->pm.fw_version;
657
658 out:
659 mutex_unlock(&adev->pm.mutex);
660 return r;
661 }
662
amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device * adev,bool enable)663 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
664 {
665 int ret = 0;
666
667 if (is_support_sw_smu(adev)) {
668 mutex_lock(&adev->pm.mutex);
669 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
670 enable);
671 mutex_unlock(&adev->pm.mutex);
672 }
673
674 return ret;
675 }
676
amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device * adev,uint32_t size)677 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
678 {
679 struct smu_context *smu = adev->powerplay.pp_handle;
680 int ret = 0;
681
682 if (!is_support_sw_smu(adev))
683 return -EOPNOTSUPP;
684
685 mutex_lock(&adev->pm.mutex);
686 ret = smu_send_hbm_bad_pages_num(smu, size);
687 mutex_unlock(&adev->pm.mutex);
688
689 return ret;
690 }
691
amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device * adev,uint32_t size)692 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
693 {
694 struct smu_context *smu = adev->powerplay.pp_handle;
695 int ret = 0;
696
697 if (!is_support_sw_smu(adev))
698 return -EOPNOTSUPP;
699
700 mutex_lock(&adev->pm.mutex);
701 ret = smu_send_hbm_bad_channel_flag(smu, size);
702 mutex_unlock(&adev->pm.mutex);
703
704 return ret;
705 }
706
amdgpu_dpm_send_rma_reason(struct amdgpu_device * adev)707 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
708 {
709 struct smu_context *smu = adev->powerplay.pp_handle;
710 int ret;
711
712 if (!is_support_sw_smu(adev))
713 return -EOPNOTSUPP;
714
715 mutex_lock(&adev->pm.mutex);
716 ret = smu_send_rma_reason(smu);
717 mutex_unlock(&adev->pm.mutex);
718
719 return ret;
720 }
721
amdgpu_dpm_reset_sdma(struct amdgpu_device * adev,uint32_t inst_mask)722 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
723 {
724 struct smu_context *smu = adev->powerplay.pp_handle;
725 int ret;
726
727 if (!is_support_sw_smu(adev))
728 return -EOPNOTSUPP;
729
730 mutex_lock(&adev->pm.mutex);
731 ret = smu_reset_sdma(smu, inst_mask);
732 mutex_unlock(&adev->pm.mutex);
733
734 return ret;
735 }
736
amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t * min,uint32_t * max)737 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
738 enum pp_clock_type type,
739 uint32_t *min,
740 uint32_t *max)
741 {
742 int ret = 0;
743
744 if (type != PP_SCLK)
745 return -EINVAL;
746
747 if (!is_support_sw_smu(adev))
748 return -EOPNOTSUPP;
749
750 mutex_lock(&adev->pm.mutex);
751 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
752 SMU_SCLK,
753 min,
754 max);
755 mutex_unlock(&adev->pm.mutex);
756
757 return ret;
758 }
759
amdgpu_dpm_set_soft_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t min,uint32_t max)760 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
761 enum pp_clock_type type,
762 uint32_t min,
763 uint32_t max)
764 {
765 struct smu_context *smu = adev->powerplay.pp_handle;
766 int ret = 0;
767
768 if (type != PP_SCLK)
769 return -EINVAL;
770
771 if (!is_support_sw_smu(adev))
772 return -EOPNOTSUPP;
773
774 mutex_lock(&adev->pm.mutex);
775 ret = smu_set_soft_freq_range(smu,
776 SMU_SCLK,
777 min,
778 max);
779 mutex_unlock(&adev->pm.mutex);
780
781 return ret;
782 }
783
amdgpu_dpm_write_watermarks_table(struct amdgpu_device * adev)784 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
785 {
786 struct smu_context *smu = adev->powerplay.pp_handle;
787 int ret = 0;
788
789 if (!is_support_sw_smu(adev))
790 return 0;
791
792 mutex_lock(&adev->pm.mutex);
793 ret = smu_write_watermarks_table(smu);
794 mutex_unlock(&adev->pm.mutex);
795
796 return ret;
797 }
798
amdgpu_dpm_wait_for_event(struct amdgpu_device * adev,enum smu_event_type event,uint64_t event_arg)799 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
800 enum smu_event_type event,
801 uint64_t event_arg)
802 {
803 struct smu_context *smu = adev->powerplay.pp_handle;
804 int ret = 0;
805
806 if (!is_support_sw_smu(adev))
807 return -EOPNOTSUPP;
808
809 mutex_lock(&adev->pm.mutex);
810 ret = smu_wait_for_event(smu, event, event_arg);
811 mutex_unlock(&adev->pm.mutex);
812
813 return ret;
814 }
815
amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device * adev,bool value)816 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
817 {
818 struct smu_context *smu = adev->powerplay.pp_handle;
819 int ret = 0;
820
821 if (!is_support_sw_smu(adev))
822 return -EOPNOTSUPP;
823
824 mutex_lock(&adev->pm.mutex);
825 ret = smu_set_residency_gfxoff(smu, value);
826 mutex_unlock(&adev->pm.mutex);
827
828 return ret;
829 }
830
amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device * adev,u32 * value)831 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
832 {
833 struct smu_context *smu = adev->powerplay.pp_handle;
834 int ret = 0;
835
836 if (!is_support_sw_smu(adev))
837 return -EOPNOTSUPP;
838
839 mutex_lock(&adev->pm.mutex);
840 ret = smu_get_residency_gfxoff(smu, value);
841 mutex_unlock(&adev->pm.mutex);
842
843 return ret;
844 }
845
amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device * adev,u64 * value)846 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
847 {
848 struct smu_context *smu = adev->powerplay.pp_handle;
849 int ret = 0;
850
851 if (!is_support_sw_smu(adev))
852 return -EOPNOTSUPP;
853
854 mutex_lock(&adev->pm.mutex);
855 ret = smu_get_entrycount_gfxoff(smu, value);
856 mutex_unlock(&adev->pm.mutex);
857
858 return ret;
859 }
860
amdgpu_dpm_get_status_gfxoff(struct amdgpu_device * adev,uint32_t * value)861 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
862 {
863 struct smu_context *smu = adev->powerplay.pp_handle;
864 int ret = 0;
865
866 if (!is_support_sw_smu(adev))
867 return -EOPNOTSUPP;
868
869 mutex_lock(&adev->pm.mutex);
870 ret = smu_get_status_gfxoff(smu, value);
871 mutex_unlock(&adev->pm.mutex);
872
873 return ret;
874 }
875
amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device * adev)876 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
877 {
878 struct smu_context *smu = adev->powerplay.pp_handle;
879
880 if (!is_support_sw_smu(adev))
881 return 0;
882
883 return atomic64_read(&smu->throttle_int_counter);
884 }
885
886 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
887 * @adev: amdgpu_device pointer
888 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
889 *
890 */
amdgpu_dpm_gfx_state_change(struct amdgpu_device * adev,enum gfx_change_state state)891 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
892 enum gfx_change_state state)
893 {
894 mutex_lock(&adev->pm.mutex);
895 if (adev->powerplay.pp_funcs &&
896 adev->powerplay.pp_funcs->gfx_state_change_set)
897 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
898 (adev)->powerplay.pp_handle, state));
899 mutex_unlock(&adev->pm.mutex);
900 }
901
amdgpu_dpm_get_ecc_info(struct amdgpu_device * adev,void * umc_ecc)902 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
903 void *umc_ecc)
904 {
905 struct smu_context *smu = adev->powerplay.pp_handle;
906 int ret = 0;
907
908 if (!is_support_sw_smu(adev))
909 return -EOPNOTSUPP;
910
911 mutex_lock(&adev->pm.mutex);
912 ret = smu_get_ecc_info(smu, umc_ecc);
913 mutex_unlock(&adev->pm.mutex);
914
915 return ret;
916 }
917
amdgpu_dpm_get_vce_clock_state(struct amdgpu_device * adev,uint32_t idx)918 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
919 uint32_t idx)
920 {
921 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
922 struct amd_vce_state *vstate = NULL;
923
924 if (!pp_funcs->get_vce_clock_state)
925 return NULL;
926
927 mutex_lock(&adev->pm.mutex);
928 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
929 idx);
930 mutex_unlock(&adev->pm.mutex);
931
932 return vstate;
933 }
934
amdgpu_dpm_get_current_power_state(struct amdgpu_device * adev,enum amd_pm_state_type * state)935 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
936 enum amd_pm_state_type *state)
937 {
938 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
939
940 mutex_lock(&adev->pm.mutex);
941
942 if (!pp_funcs->get_current_power_state) {
943 *state = adev->pm.dpm.user_state;
944 goto out;
945 }
946
947 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
948 if (*state < POWER_STATE_TYPE_DEFAULT ||
949 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
950 *state = adev->pm.dpm.user_state;
951
952 out:
953 mutex_unlock(&adev->pm.mutex);
954 }
955
amdgpu_dpm_set_power_state(struct amdgpu_device * adev,enum amd_pm_state_type state)956 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
957 enum amd_pm_state_type state)
958 {
959 mutex_lock(&adev->pm.mutex);
960 adev->pm.dpm.user_state = state;
961 mutex_unlock(&adev->pm.mutex);
962
963 if (is_support_sw_smu(adev))
964 return;
965
966 if (amdgpu_dpm_dispatch_task(adev,
967 AMD_PP_TASK_ENABLE_USER_STATE,
968 &state) == -EOPNOTSUPP)
969 amdgpu_dpm_compute_clocks(adev);
970 }
971
amdgpu_dpm_get_performance_level(struct amdgpu_device * adev)972 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
973 {
974 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
975 enum amd_dpm_forced_level level;
976
977 if (!pp_funcs)
978 return AMD_DPM_FORCED_LEVEL_AUTO;
979
980 mutex_lock(&adev->pm.mutex);
981 if (pp_funcs->get_performance_level)
982 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
983 else
984 level = adev->pm.dpm.forced_level;
985 mutex_unlock(&adev->pm.mutex);
986
987 return level;
988 }
989
amdgpu_dpm_enter_umd_state(struct amdgpu_device * adev)990 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
991 {
992 /* enter UMD Pstate */
993 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
994 AMD_PG_STATE_UNGATE);
995 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
996 AMD_CG_STATE_UNGATE);
997 }
998
amdgpu_dpm_exit_umd_state(struct amdgpu_device * adev)999 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
1000 {
1001 /* exit UMD Pstate */
1002 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1003 AMD_CG_STATE_GATE);
1004 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1005 AMD_PG_STATE_GATE);
1006 }
1007
amdgpu_dpm_force_performance_level(struct amdgpu_device * adev,enum amd_dpm_forced_level level)1008 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
1009 enum amd_dpm_forced_level level)
1010 {
1011 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1012 enum amd_dpm_forced_level current_level;
1013 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1014 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1015 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1016 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1017
1018 if (!pp_funcs || !pp_funcs->force_performance_level)
1019 return 0;
1020
1021 if (adev->pm.dpm.thermal_active)
1022 return -EINVAL;
1023
1024 current_level = amdgpu_dpm_get_performance_level(adev);
1025 if (current_level == level)
1026 return 0;
1027
1028 if (!(current_level & profile_mode_mask) &&
1029 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
1030 return -EINVAL;
1031
1032 if (adev->asic_type == CHIP_RAVEN) {
1033 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
1034 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1035 level == AMD_DPM_FORCED_LEVEL_MANUAL)
1036 amdgpu_gfx_off_ctrl(adev, false);
1037 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
1038 level != AMD_DPM_FORCED_LEVEL_MANUAL)
1039 amdgpu_gfx_off_ctrl(adev, true);
1040 }
1041 }
1042
1043 if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
1044 amdgpu_dpm_enter_umd_state(adev);
1045 else if ((current_level & profile_mode_mask) &&
1046 !(level & profile_mode_mask))
1047 amdgpu_dpm_exit_umd_state(adev);
1048
1049 mutex_lock(&adev->pm.mutex);
1050
1051 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1052 level)) {
1053 mutex_unlock(&adev->pm.mutex);
1054 /* If new level failed, retain the umd state as before */
1055 if (!(current_level & profile_mode_mask) &&
1056 (level & profile_mode_mask))
1057 amdgpu_dpm_exit_umd_state(adev);
1058 else if ((current_level & profile_mode_mask) &&
1059 !(level & profile_mode_mask))
1060 amdgpu_dpm_enter_umd_state(adev);
1061
1062 return -EINVAL;
1063 }
1064
1065 adev->pm.dpm.forced_level = level;
1066
1067 mutex_unlock(&adev->pm.mutex);
1068
1069 return 0;
1070 }
1071
amdgpu_dpm_get_pp_num_states(struct amdgpu_device * adev,struct pp_states_info * states)1072 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1073 struct pp_states_info *states)
1074 {
1075 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1076 int ret = 0;
1077
1078 if (!pp_funcs->get_pp_num_states)
1079 return -EOPNOTSUPP;
1080
1081 mutex_lock(&adev->pm.mutex);
1082 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1083 states);
1084 mutex_unlock(&adev->pm.mutex);
1085
1086 return ret;
1087 }
1088
amdgpu_dpm_dispatch_task(struct amdgpu_device * adev,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)1089 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1090 enum amd_pp_task task_id,
1091 enum amd_pm_state_type *user_state)
1092 {
1093 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1094 int ret = 0;
1095
1096 if (!pp_funcs->dispatch_tasks)
1097 return -EOPNOTSUPP;
1098
1099 mutex_lock(&adev->pm.mutex);
1100 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1101 task_id,
1102 user_state);
1103 mutex_unlock(&adev->pm.mutex);
1104
1105 return ret;
1106 }
1107
amdgpu_dpm_get_pp_table(struct amdgpu_device * adev,char ** table)1108 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1109 {
1110 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1111 int ret = 0;
1112
1113 if (!pp_funcs->get_pp_table)
1114 return 0;
1115
1116 mutex_lock(&adev->pm.mutex);
1117 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1118 table);
1119 mutex_unlock(&adev->pm.mutex);
1120
1121 return ret;
1122 }
1123
amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1124 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1125 uint32_t type,
1126 long *input,
1127 uint32_t size)
1128 {
1129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1130 int ret = 0;
1131
1132 if (!pp_funcs->set_fine_grain_clk_vol)
1133 return 0;
1134
1135 mutex_lock(&adev->pm.mutex);
1136 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1137 type,
1138 input,
1139 size);
1140 mutex_unlock(&adev->pm.mutex);
1141
1142 return ret;
1143 }
1144
amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1145 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1146 uint32_t type,
1147 long *input,
1148 uint32_t size)
1149 {
1150 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1151 int ret = 0;
1152
1153 if (!pp_funcs->odn_edit_dpm_table)
1154 return 0;
1155
1156 mutex_lock(&adev->pm.mutex);
1157 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1158 type,
1159 input,
1160 size);
1161 mutex_unlock(&adev->pm.mutex);
1162
1163 return ret;
1164 }
1165
amdgpu_dpm_print_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf)1166 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1167 enum pp_clock_type type,
1168 char *buf)
1169 {
1170 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1171 int ret = 0;
1172
1173 if (!pp_funcs->print_clock_levels)
1174 return 0;
1175
1176 mutex_lock(&adev->pm.mutex);
1177 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1178 type,
1179 buf);
1180 mutex_unlock(&adev->pm.mutex);
1181
1182 return ret;
1183 }
1184
amdgpu_dpm_emit_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf,int * offset)1185 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1186 enum pp_clock_type type,
1187 char *buf,
1188 int *offset)
1189 {
1190 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1191 int ret = 0;
1192
1193 if (!pp_funcs->emit_clock_levels)
1194 return -ENOENT;
1195
1196 mutex_lock(&adev->pm.mutex);
1197 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1198 type,
1199 buf,
1200 offset);
1201 mutex_unlock(&adev->pm.mutex);
1202
1203 return ret;
1204 }
1205
amdgpu_dpm_set_ppfeature_status(struct amdgpu_device * adev,uint64_t ppfeature_masks)1206 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1207 uint64_t ppfeature_masks)
1208 {
1209 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1210 int ret = 0;
1211
1212 if (!pp_funcs->set_ppfeature_status)
1213 return 0;
1214
1215 mutex_lock(&adev->pm.mutex);
1216 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1217 ppfeature_masks);
1218 mutex_unlock(&adev->pm.mutex);
1219
1220 return ret;
1221 }
1222
amdgpu_dpm_get_ppfeature_status(struct amdgpu_device * adev,char * buf)1223 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1224 {
1225 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1226 int ret = 0;
1227
1228 if (!pp_funcs->get_ppfeature_status)
1229 return 0;
1230
1231 mutex_lock(&adev->pm.mutex);
1232 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1233 buf);
1234 mutex_unlock(&adev->pm.mutex);
1235
1236 return ret;
1237 }
1238
amdgpu_dpm_force_clock_level(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t mask)1239 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1240 enum pp_clock_type type,
1241 uint32_t mask)
1242 {
1243 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1244 int ret = 0;
1245
1246 if (!pp_funcs->force_clock_level)
1247 return 0;
1248
1249 mutex_lock(&adev->pm.mutex);
1250 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1251 type,
1252 mask);
1253 mutex_unlock(&adev->pm.mutex);
1254
1255 return ret;
1256 }
1257
amdgpu_dpm_get_sclk_od(struct amdgpu_device * adev)1258 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1259 {
1260 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1261 int ret = 0;
1262
1263 if (!pp_funcs->get_sclk_od)
1264 return -EOPNOTSUPP;
1265
1266 mutex_lock(&adev->pm.mutex);
1267 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1268 mutex_unlock(&adev->pm.mutex);
1269
1270 return ret;
1271 }
1272
amdgpu_dpm_set_sclk_od(struct amdgpu_device * adev,uint32_t value)1273 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1274 {
1275 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1276
1277 if (is_support_sw_smu(adev))
1278 return -EOPNOTSUPP;
1279
1280 mutex_lock(&adev->pm.mutex);
1281 if (pp_funcs->set_sclk_od)
1282 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1283 mutex_unlock(&adev->pm.mutex);
1284
1285 if (amdgpu_dpm_dispatch_task(adev,
1286 AMD_PP_TASK_READJUST_POWER_STATE,
1287 NULL) == -EOPNOTSUPP) {
1288 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1289 amdgpu_dpm_compute_clocks(adev);
1290 }
1291
1292 return 0;
1293 }
1294
amdgpu_dpm_get_mclk_od(struct amdgpu_device * adev)1295 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1296 {
1297 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1298 int ret = 0;
1299
1300 if (!pp_funcs->get_mclk_od)
1301 return -EOPNOTSUPP;
1302
1303 mutex_lock(&adev->pm.mutex);
1304 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1305 mutex_unlock(&adev->pm.mutex);
1306
1307 return ret;
1308 }
1309
amdgpu_dpm_set_mclk_od(struct amdgpu_device * adev,uint32_t value)1310 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1311 {
1312 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1313
1314 if (is_support_sw_smu(adev))
1315 return -EOPNOTSUPP;
1316
1317 mutex_lock(&adev->pm.mutex);
1318 if (pp_funcs->set_mclk_od)
1319 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1320 mutex_unlock(&adev->pm.mutex);
1321
1322 if (amdgpu_dpm_dispatch_task(adev,
1323 AMD_PP_TASK_READJUST_POWER_STATE,
1324 NULL) == -EOPNOTSUPP) {
1325 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1326 amdgpu_dpm_compute_clocks(adev);
1327 }
1328
1329 return 0;
1330 }
1331
amdgpu_dpm_get_power_profile_mode(struct amdgpu_device * adev,char * buf)1332 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1333 char *buf)
1334 {
1335 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1336 int ret = 0;
1337
1338 if (!pp_funcs->get_power_profile_mode)
1339 return -EOPNOTSUPP;
1340
1341 mutex_lock(&adev->pm.mutex);
1342 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1343 buf);
1344 mutex_unlock(&adev->pm.mutex);
1345
1346 return ret;
1347 }
1348
amdgpu_dpm_set_power_profile_mode(struct amdgpu_device * adev,long * input,uint32_t size)1349 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1350 long *input, uint32_t size)
1351 {
1352 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1353 int ret = 0;
1354
1355 if (!pp_funcs->set_power_profile_mode)
1356 return 0;
1357
1358 mutex_lock(&adev->pm.mutex);
1359 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1360 input,
1361 size);
1362 mutex_unlock(&adev->pm.mutex);
1363
1364 return ret;
1365 }
1366
amdgpu_dpm_get_gpu_metrics(struct amdgpu_device * adev,void ** table)1367 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1368 {
1369 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1370 int ret = 0;
1371
1372 if (!pp_funcs->get_gpu_metrics)
1373 return 0;
1374
1375 mutex_lock(&adev->pm.mutex);
1376 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1377 table);
1378 mutex_unlock(&adev->pm.mutex);
1379
1380 return ret;
1381 }
1382
amdgpu_dpm_get_pm_metrics(struct amdgpu_device * adev,void * pm_metrics,size_t size)1383 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1384 size_t size)
1385 {
1386 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1387 int ret = 0;
1388
1389 if (!pp_funcs->get_pm_metrics)
1390 return -EOPNOTSUPP;
1391
1392 mutex_lock(&adev->pm.mutex);
1393 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1394 size);
1395 mutex_unlock(&adev->pm.mutex);
1396
1397 return ret;
1398 }
1399
amdgpu_dpm_get_fan_control_mode(struct amdgpu_device * adev,uint32_t * fan_mode)1400 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1401 uint32_t *fan_mode)
1402 {
1403 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1404 int ret = 0;
1405
1406 if (!pp_funcs->get_fan_control_mode)
1407 return -EOPNOTSUPP;
1408
1409 mutex_lock(&adev->pm.mutex);
1410 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1411 fan_mode);
1412 mutex_unlock(&adev->pm.mutex);
1413
1414 return ret;
1415 }
1416
amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device * adev,uint32_t speed)1417 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1418 uint32_t speed)
1419 {
1420 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1421 int ret = 0;
1422
1423 if (!pp_funcs->set_fan_speed_pwm)
1424 return -EOPNOTSUPP;
1425
1426 mutex_lock(&adev->pm.mutex);
1427 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1428 speed);
1429 mutex_unlock(&adev->pm.mutex);
1430
1431 return ret;
1432 }
1433
amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device * adev,uint32_t * speed)1434 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1435 uint32_t *speed)
1436 {
1437 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1438 int ret = 0;
1439
1440 if (!pp_funcs->get_fan_speed_pwm)
1441 return -EOPNOTSUPP;
1442
1443 mutex_lock(&adev->pm.mutex);
1444 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1445 speed);
1446 mutex_unlock(&adev->pm.mutex);
1447
1448 return ret;
1449 }
1450
amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device * adev,uint32_t * speed)1451 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1452 uint32_t *speed)
1453 {
1454 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1455 int ret = 0;
1456
1457 if (!pp_funcs->get_fan_speed_rpm)
1458 return -EOPNOTSUPP;
1459
1460 mutex_lock(&adev->pm.mutex);
1461 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1462 speed);
1463 mutex_unlock(&adev->pm.mutex);
1464
1465 return ret;
1466 }
1467
amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device * adev,uint32_t speed)1468 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1469 uint32_t speed)
1470 {
1471 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1472 int ret = 0;
1473
1474 if (!pp_funcs->set_fan_speed_rpm)
1475 return -EOPNOTSUPP;
1476
1477 mutex_lock(&adev->pm.mutex);
1478 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1479 speed);
1480 mutex_unlock(&adev->pm.mutex);
1481
1482 return ret;
1483 }
1484
amdgpu_dpm_set_fan_control_mode(struct amdgpu_device * adev,uint32_t mode)1485 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1486 uint32_t mode)
1487 {
1488 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1489 int ret = 0;
1490
1491 if (!pp_funcs->set_fan_control_mode)
1492 return -EOPNOTSUPP;
1493
1494 mutex_lock(&adev->pm.mutex);
1495 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1496 mode);
1497 mutex_unlock(&adev->pm.mutex);
1498
1499 return ret;
1500 }
1501
amdgpu_dpm_get_power_limit(struct amdgpu_device * adev,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)1502 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1503 uint32_t *limit,
1504 enum pp_power_limit_level pp_limit_level,
1505 enum pp_power_type power_type)
1506 {
1507 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1508 int ret = 0;
1509
1510 if (!pp_funcs->get_power_limit)
1511 return -ENODATA;
1512
1513 mutex_lock(&adev->pm.mutex);
1514 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1515 limit,
1516 pp_limit_level,
1517 power_type);
1518 mutex_unlock(&adev->pm.mutex);
1519
1520 return ret;
1521 }
1522
amdgpu_dpm_set_power_limit(struct amdgpu_device * adev,uint32_t limit)1523 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1524 uint32_t limit)
1525 {
1526 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1527 int ret = 0;
1528
1529 if (!pp_funcs->set_power_limit)
1530 return -EINVAL;
1531
1532 mutex_lock(&adev->pm.mutex);
1533 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1534 limit);
1535 mutex_unlock(&adev->pm.mutex);
1536
1537 return ret;
1538 }
1539
amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device * adev)1540 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1541 {
1542 bool cclk_dpm_supported = false;
1543
1544 if (!is_support_sw_smu(adev))
1545 return false;
1546
1547 mutex_lock(&adev->pm.mutex);
1548 cclk_dpm_supported = is_support_cclk_dpm(adev);
1549 mutex_unlock(&adev->pm.mutex);
1550
1551 return (int)cclk_dpm_supported;
1552 }
1553
amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device * adev,struct seq_file * m)1554 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1555 struct seq_file *m)
1556 {
1557 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1558
1559 if (!pp_funcs->debugfs_print_current_performance_level)
1560 return -EOPNOTSUPP;
1561
1562 mutex_lock(&adev->pm.mutex);
1563 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1564 m);
1565 mutex_unlock(&adev->pm.mutex);
1566
1567 return 0;
1568 }
1569
amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device * adev,void ** addr,size_t * size)1570 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1571 void **addr,
1572 size_t *size)
1573 {
1574 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1575 int ret = 0;
1576
1577 if (!pp_funcs->get_smu_prv_buf_details)
1578 return -ENOSYS;
1579
1580 mutex_lock(&adev->pm.mutex);
1581 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1582 addr,
1583 size);
1584 mutex_unlock(&adev->pm.mutex);
1585
1586 return ret;
1587 }
1588
amdgpu_dpm_is_overdrive_supported(struct amdgpu_device * adev)1589 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1590 {
1591 if (is_support_sw_smu(adev)) {
1592 struct smu_context *smu = adev->powerplay.pp_handle;
1593
1594 return (smu->od_enabled || smu->is_apu);
1595 } else {
1596 struct pp_hwmgr *hwmgr;
1597
1598 /*
1599 * dpm on some legacy asics don't carry od_enabled member
1600 * as its pp_handle is casted directly from adev.
1601 */
1602 if (amdgpu_dpm_is_legacy_dpm(adev))
1603 return false;
1604
1605 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1606
1607 return hwmgr->od_enabled;
1608 }
1609 }
1610
amdgpu_dpm_set_pp_table(struct amdgpu_device * adev,const char * buf,size_t size)1611 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1612 const char *buf,
1613 size_t size)
1614 {
1615 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1616 int ret = 0;
1617
1618 if (!pp_funcs->set_pp_table)
1619 return -EOPNOTSUPP;
1620
1621 mutex_lock(&adev->pm.mutex);
1622 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1623 buf,
1624 size);
1625 mutex_unlock(&adev->pm.mutex);
1626
1627 return ret;
1628 }
1629
amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device * adev)1630 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1631 {
1632 struct smu_context *smu = adev->powerplay.pp_handle;
1633
1634 if (!is_support_sw_smu(adev))
1635 return INT_MAX;
1636
1637 return smu->cpu_core_num;
1638 }
1639
amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device * adev)1640 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1641 {
1642 if (!is_support_sw_smu(adev))
1643 return;
1644
1645 amdgpu_smu_stb_debug_fs_init(adev);
1646 }
1647
amdgpu_dpm_display_configuration_change(struct amdgpu_device * adev,const struct amd_pp_display_configuration * input)1648 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1649 const struct amd_pp_display_configuration *input)
1650 {
1651 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1652 int ret = 0;
1653
1654 if (!pp_funcs->display_configuration_change)
1655 return 0;
1656
1657 mutex_lock(&adev->pm.mutex);
1658 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1659 input);
1660 mutex_unlock(&adev->pm.mutex);
1661
1662 return ret;
1663 }
1664
amdgpu_dpm_get_clock_by_type(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1665 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1666 enum amd_pp_clock_type type,
1667 struct amd_pp_clocks *clocks)
1668 {
1669 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1670 int ret = 0;
1671
1672 if (!pp_funcs->get_clock_by_type)
1673 return 0;
1674
1675 mutex_lock(&adev->pm.mutex);
1676 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1677 type,
1678 clocks);
1679 mutex_unlock(&adev->pm.mutex);
1680
1681 return ret;
1682 }
1683
amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device * adev,struct amd_pp_simple_clock_info * clocks)1684 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1685 struct amd_pp_simple_clock_info *clocks)
1686 {
1687 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1688 int ret = 0;
1689
1690 if (!pp_funcs->get_display_mode_validation_clocks)
1691 return 0;
1692
1693 mutex_lock(&adev->pm.mutex);
1694 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1695 clocks);
1696 mutex_unlock(&adev->pm.mutex);
1697
1698 return ret;
1699 }
1700
amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1701 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1702 enum amd_pp_clock_type type,
1703 struct pp_clock_levels_with_latency *clocks)
1704 {
1705 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1706 int ret = 0;
1707
1708 if (!pp_funcs->get_clock_by_type_with_latency)
1709 return 0;
1710
1711 mutex_lock(&adev->pm.mutex);
1712 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1713 type,
1714 clocks);
1715 mutex_unlock(&adev->pm.mutex);
1716
1717 return ret;
1718 }
1719
amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1720 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1721 enum amd_pp_clock_type type,
1722 struct pp_clock_levels_with_voltage *clocks)
1723 {
1724 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1725 int ret = 0;
1726
1727 if (!pp_funcs->get_clock_by_type_with_voltage)
1728 return 0;
1729
1730 mutex_lock(&adev->pm.mutex);
1731 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1732 type,
1733 clocks);
1734 mutex_unlock(&adev->pm.mutex);
1735
1736 return ret;
1737 }
1738
amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device * adev,void * clock_ranges)1739 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1740 void *clock_ranges)
1741 {
1742 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1743 int ret = 0;
1744
1745 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1746 return -EOPNOTSUPP;
1747
1748 mutex_lock(&adev->pm.mutex);
1749 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1750 clock_ranges);
1751 mutex_unlock(&adev->pm.mutex);
1752
1753 return ret;
1754 }
1755
amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device * adev,struct pp_display_clock_request * clock)1756 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1757 struct pp_display_clock_request *clock)
1758 {
1759 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1760 int ret = 0;
1761
1762 if (!pp_funcs->display_clock_voltage_request)
1763 return -EOPNOTSUPP;
1764
1765 mutex_lock(&adev->pm.mutex);
1766 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1767 clock);
1768 mutex_unlock(&adev->pm.mutex);
1769
1770 return ret;
1771 }
1772
amdgpu_dpm_get_current_clocks(struct amdgpu_device * adev,struct amd_pp_clock_info * clocks)1773 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1774 struct amd_pp_clock_info *clocks)
1775 {
1776 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1777 int ret = 0;
1778
1779 if (!pp_funcs->get_current_clocks)
1780 return -EOPNOTSUPP;
1781
1782 mutex_lock(&adev->pm.mutex);
1783 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1784 clocks);
1785 mutex_unlock(&adev->pm.mutex);
1786
1787 return ret;
1788 }
1789
amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device * adev)1790 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1791 {
1792 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1793
1794 if (!pp_funcs->notify_smu_enable_pwe)
1795 return;
1796
1797 mutex_lock(&adev->pm.mutex);
1798 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1799 mutex_unlock(&adev->pm.mutex);
1800 }
1801
amdgpu_dpm_set_active_display_count(struct amdgpu_device * adev,uint32_t count)1802 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1803 uint32_t count)
1804 {
1805 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1806 int ret = 0;
1807
1808 if (!pp_funcs->set_active_display_count)
1809 return -EOPNOTSUPP;
1810
1811 mutex_lock(&adev->pm.mutex);
1812 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1813 count);
1814 mutex_unlock(&adev->pm.mutex);
1815
1816 return ret;
1817 }
1818
amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device * adev,uint32_t clock)1819 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1820 uint32_t clock)
1821 {
1822 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1823 int ret = 0;
1824
1825 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1826 return -EOPNOTSUPP;
1827
1828 mutex_lock(&adev->pm.mutex);
1829 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1830 clock);
1831 mutex_unlock(&adev->pm.mutex);
1832
1833 return ret;
1834 }
1835
amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1836 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1837 uint32_t clock)
1838 {
1839 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1840
1841 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1842 return;
1843
1844 mutex_lock(&adev->pm.mutex);
1845 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1846 clock);
1847 mutex_unlock(&adev->pm.mutex);
1848 }
1849
amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1850 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1851 uint32_t clock)
1852 {
1853 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1854
1855 if (!pp_funcs->set_hard_min_fclk_by_freq)
1856 return;
1857
1858 mutex_lock(&adev->pm.mutex);
1859 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1860 clock);
1861 mutex_unlock(&adev->pm.mutex);
1862 }
1863
amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device * adev,bool disable_memory_clock_switch)1864 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1865 bool disable_memory_clock_switch)
1866 {
1867 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1868 int ret = 0;
1869
1870 if (!pp_funcs->display_disable_memory_clock_switch)
1871 return 0;
1872
1873 mutex_lock(&adev->pm.mutex);
1874 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1875 disable_memory_clock_switch);
1876 mutex_unlock(&adev->pm.mutex);
1877
1878 return ret;
1879 }
1880
amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device * adev,struct pp_smu_nv_clock_table * max_clocks)1881 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1882 struct pp_smu_nv_clock_table *max_clocks)
1883 {
1884 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1885 int ret = 0;
1886
1887 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1888 return -EOPNOTSUPP;
1889
1890 mutex_lock(&adev->pm.mutex);
1891 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1892 max_clocks);
1893 mutex_unlock(&adev->pm.mutex);
1894
1895 return ret;
1896 }
1897
amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device * adev,unsigned int * clock_values_in_khz,unsigned int * num_states)1898 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1899 unsigned int *clock_values_in_khz,
1900 unsigned int *num_states)
1901 {
1902 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1903 int ret = 0;
1904
1905 if (!pp_funcs->get_uclk_dpm_states)
1906 return -EOPNOTSUPP;
1907
1908 mutex_lock(&adev->pm.mutex);
1909 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1910 clock_values_in_khz,
1911 num_states);
1912 mutex_unlock(&adev->pm.mutex);
1913
1914 return ret;
1915 }
1916
amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device * adev,struct dpm_clocks * clock_table)1917 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1918 struct dpm_clocks *clock_table)
1919 {
1920 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1921 int ret = 0;
1922
1923 if (!pp_funcs->get_dpm_clock_table)
1924 return -EOPNOTSUPP;
1925
1926 mutex_lock(&adev->pm.mutex);
1927 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1928 clock_table);
1929 mutex_unlock(&adev->pm.mutex);
1930
1931 return ret;
1932 }
1933