1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 int ret = 0;
45
46 if (!pp_funcs->get_sclk)
47 return 0;
48
49 mutex_lock(&adev->pm.mutex);
50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 low);
52 mutex_unlock(&adev->pm.mutex);
53
54 return ret;
55 }
56
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 int ret = 0;
61
62 if (!pp_funcs->get_mclk)
63 return 0;
64
65 mutex_lock(&adev->pm.mutex);
66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 low);
68 mutex_unlock(&adev->pm.mutex);
69
70 return ret;
71 }
72
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate,int inst)73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
74 uint32_t block_type,
75 bool gate,
76 int inst)
77 {
78 int ret = 0;
79 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
80 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
81 bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
82
83 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
84 (!is_vcn || adev->vcn.num_vcn_inst == 1)) {
85 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
86 block_type, gate ? "gate" : "ungate");
87 return 0;
88 }
89
90 mutex_lock(&adev->pm.mutex);
91
92 switch (block_type) {
93 case AMD_IP_BLOCK_TYPE_UVD:
94 case AMD_IP_BLOCK_TYPE_VCE:
95 case AMD_IP_BLOCK_TYPE_GFX:
96 case AMD_IP_BLOCK_TYPE_SDMA:
97 case AMD_IP_BLOCK_TYPE_JPEG:
98 case AMD_IP_BLOCK_TYPE_GMC:
99 case AMD_IP_BLOCK_TYPE_ACP:
100 case AMD_IP_BLOCK_TYPE_VPE:
101 if (pp_funcs && pp_funcs->set_powergating_by_smu)
102 ret = (pp_funcs->set_powergating_by_smu(
103 (adev)->powerplay.pp_handle, block_type, gate, 0));
104 break;
105 case AMD_IP_BLOCK_TYPE_VCN:
106 if (pp_funcs && pp_funcs->set_powergating_by_smu)
107 ret = (pp_funcs->set_powergating_by_smu(
108 (adev)->powerplay.pp_handle, block_type, gate, inst));
109 break;
110 default:
111 break;
112 }
113
114 if (!ret)
115 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
116
117 mutex_unlock(&adev->pm.mutex);
118
119 return ret;
120 }
121
amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device * adev)122 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
123 {
124 struct smu_context *smu = adev->powerplay.pp_handle;
125 int ret = -EOPNOTSUPP;
126
127 mutex_lock(&adev->pm.mutex);
128 ret = smu_set_gfx_power_up_by_imu(smu);
129 mutex_unlock(&adev->pm.mutex);
130
131 msleep(10);
132
133 return ret;
134 }
135
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)136 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
137 {
138 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
139 void *pp_handle = adev->powerplay.pp_handle;
140 int ret = 0;
141
142 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
143 return -ENOENT;
144
145 mutex_lock(&adev->pm.mutex);
146
147 /* enter BACO state */
148 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
149
150 mutex_unlock(&adev->pm.mutex);
151
152 return ret;
153 }
154
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)155 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
156 {
157 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
158 void *pp_handle = adev->powerplay.pp_handle;
159 int ret = 0;
160
161 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
162 return -ENOENT;
163
164 mutex_lock(&adev->pm.mutex);
165
166 /* exit BACO state */
167 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
168
169 mutex_unlock(&adev->pm.mutex);
170
171 return ret;
172 }
173
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)174 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
175 enum pp_mp1_state mp1_state)
176 {
177 int ret = 0;
178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
179
180 if (mp1_state == PP_MP1_STATE_FLR) {
181 /* VF lost access to SMU */
182 if (amdgpu_sriov_vf(adev))
183 adev->pm.dpm_enabled = false;
184 } else if (pp_funcs && pp_funcs->set_mp1_state) {
185 mutex_lock(&adev->pm.mutex);
186
187 ret = pp_funcs->set_mp1_state(
188 adev->powerplay.pp_handle,
189 mp1_state);
190
191 mutex_unlock(&adev->pm.mutex);
192 }
193
194 return ret;
195 }
196
amdgpu_dpm_notify_rlc_state(struct amdgpu_device * adev,bool en)197 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
198 {
199 int ret = 0;
200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
201
202 if (pp_funcs && pp_funcs->notify_rlc_state) {
203 mutex_lock(&adev->pm.mutex);
204
205 ret = pp_funcs->notify_rlc_state(
206 adev->powerplay.pp_handle,
207 en);
208
209 mutex_unlock(&adev->pm.mutex);
210 }
211
212 return ret;
213 }
214
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)215 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
216 {
217 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
218 void *pp_handle = adev->powerplay.pp_handle;
219 int ret;
220
221 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
222 return 0;
223 /* Don't use baco for reset in S3.
224 * This is a workaround for some platforms
225 * where entering BACO during suspend
226 * seems to cause reboots or hangs.
227 * This might be related to the fact that BACO controls
228 * power to the whole GPU including devices like audio and USB.
229 * Powering down/up everything may adversely affect these other
230 * devices. Needs more investigation.
231 */
232 if (adev->in_s3)
233 return 0;
234
235 mutex_lock(&adev->pm.mutex);
236
237 ret = pp_funcs->get_asic_baco_capability(pp_handle);
238
239 mutex_unlock(&adev->pm.mutex);
240
241 return ret;
242 }
243
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)244 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
245 {
246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
247 void *pp_handle = adev->powerplay.pp_handle;
248 int ret = 0;
249
250 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
251 return -ENOENT;
252
253 mutex_lock(&adev->pm.mutex);
254
255 ret = pp_funcs->asic_reset_mode_2(pp_handle);
256
257 mutex_unlock(&adev->pm.mutex);
258
259 return ret;
260 }
261
amdgpu_dpm_enable_gfx_features(struct amdgpu_device * adev)262 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
263 {
264 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
265 void *pp_handle = adev->powerplay.pp_handle;
266 int ret = 0;
267
268 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
269 return -ENOENT;
270
271 mutex_lock(&adev->pm.mutex);
272
273 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
274
275 mutex_unlock(&adev->pm.mutex);
276
277 return ret;
278 }
279
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)280 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
281 {
282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
283 void *pp_handle = adev->powerplay.pp_handle;
284 int ret = 0;
285
286 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
287 return -ENOENT;
288
289 mutex_lock(&adev->pm.mutex);
290
291 /* enter BACO state */
292 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
293 if (ret)
294 goto out;
295
296 /* exit BACO state */
297 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
298
299 out:
300 mutex_unlock(&adev->pm.mutex);
301 return ret;
302 }
303
amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device * adev)304 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
305 {
306 struct smu_context *smu = adev->powerplay.pp_handle;
307 bool support_mode1_reset = false;
308
309 if (is_support_sw_smu(adev)) {
310 mutex_lock(&adev->pm.mutex);
311 support_mode1_reset = smu_mode1_reset_is_support(smu);
312 mutex_unlock(&adev->pm.mutex);
313 }
314
315 return support_mode1_reset;
316 }
317
amdgpu_dpm_mode1_reset(struct amdgpu_device * adev)318 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
319 {
320 struct smu_context *smu = adev->powerplay.pp_handle;
321 int ret = -EOPNOTSUPP;
322
323 if (is_support_sw_smu(adev)) {
324 mutex_lock(&adev->pm.mutex);
325 ret = smu_mode1_reset(smu);
326 mutex_unlock(&adev->pm.mutex);
327 }
328
329 return ret;
330 }
331
amdgpu_dpm_is_link_reset_supported(struct amdgpu_device * adev)332 bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev)
333 {
334 struct smu_context *smu = adev->powerplay.pp_handle;
335 bool support_link_reset = false;
336
337 if (is_support_sw_smu(adev)) {
338 mutex_lock(&adev->pm.mutex);
339 support_link_reset = smu_link_reset_is_support(smu);
340 mutex_unlock(&adev->pm.mutex);
341 }
342
343 return support_link_reset;
344 }
345
amdgpu_dpm_link_reset(struct amdgpu_device * adev)346 int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
347 {
348 struct smu_context *smu = adev->powerplay.pp_handle;
349 int ret = -EOPNOTSUPP;
350
351 if (is_support_sw_smu(adev)) {
352 mutex_lock(&adev->pm.mutex);
353 ret = smu_link_reset(smu);
354 mutex_unlock(&adev->pm.mutex);
355 }
356
357 return ret;
358 }
359
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)360 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
361 enum PP_SMC_POWER_PROFILE type,
362 bool en)
363 {
364 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
365 int ret = 0;
366
367 if (amdgpu_sriov_vf(adev))
368 return 0;
369
370 if (pp_funcs && pp_funcs->switch_power_profile) {
371 mutex_lock(&adev->pm.mutex);
372 ret = pp_funcs->switch_power_profile(
373 adev->powerplay.pp_handle, type, en);
374 mutex_unlock(&adev->pm.mutex);
375 }
376
377 return ret;
378 }
379
amdgpu_dpm_pause_power_profile(struct amdgpu_device * adev,bool pause)380 int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
381 bool pause)
382 {
383 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
384 int ret = 0;
385
386 if (amdgpu_sriov_vf(adev))
387 return 0;
388
389 if (pp_funcs && pp_funcs->pause_power_profile) {
390 mutex_lock(&adev->pm.mutex);
391 ret = pp_funcs->pause_power_profile(
392 adev->powerplay.pp_handle, pause);
393 mutex_unlock(&adev->pm.mutex);
394 }
395
396 return ret;
397 }
398
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)399 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
400 uint32_t pstate)
401 {
402 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
403 int ret = 0;
404
405 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
406 mutex_lock(&adev->pm.mutex);
407 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
408 pstate);
409 mutex_unlock(&adev->pm.mutex);
410 }
411
412 return ret;
413 }
414
amdgpu_dpm_set_df_cstate(struct amdgpu_device * adev,uint32_t cstate)415 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
416 uint32_t cstate)
417 {
418 int ret = 0;
419 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
420 void *pp_handle = adev->powerplay.pp_handle;
421
422 if (pp_funcs && pp_funcs->set_df_cstate) {
423 mutex_lock(&adev->pm.mutex);
424 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
425 mutex_unlock(&adev->pm.mutex);
426 }
427
428 return ret;
429 }
430
amdgpu_dpm_get_pm_policy_info(struct amdgpu_device * adev,enum pp_pm_policy p_type,char * buf)431 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
432 enum pp_pm_policy p_type, char *buf)
433 {
434 struct smu_context *smu = adev->powerplay.pp_handle;
435 int ret = -EOPNOTSUPP;
436
437 if (is_support_sw_smu(adev)) {
438 mutex_lock(&adev->pm.mutex);
439 ret = smu_get_pm_policy_info(smu, p_type, buf);
440 mutex_unlock(&adev->pm.mutex);
441 }
442
443 return ret;
444 }
445
amdgpu_dpm_set_pm_policy(struct amdgpu_device * adev,int policy_type,int policy_level)446 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
447 int policy_level)
448 {
449 struct smu_context *smu = adev->powerplay.pp_handle;
450 int ret = -EOPNOTSUPP;
451
452 if (is_support_sw_smu(adev)) {
453 mutex_lock(&adev->pm.mutex);
454 ret = smu_set_pm_policy(smu, policy_type, policy_level);
455 mutex_unlock(&adev->pm.mutex);
456 }
457
458 return ret;
459 }
460
amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device * adev)461 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
462 {
463 void *pp_handle = adev->powerplay.pp_handle;
464 const struct amd_pm_funcs *pp_funcs =
465 adev->powerplay.pp_funcs;
466 int ret = 0;
467
468 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
469 mutex_lock(&adev->pm.mutex);
470 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
471 mutex_unlock(&adev->pm.mutex);
472 }
473
474 return ret;
475 }
476
amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device * adev,uint32_t msg_id)477 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
478 uint32_t msg_id)
479 {
480 void *pp_handle = adev->powerplay.pp_handle;
481 const struct amd_pm_funcs *pp_funcs =
482 adev->powerplay.pp_funcs;
483 int ret = 0;
484
485 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
486 mutex_lock(&adev->pm.mutex);
487 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
488 msg_id);
489 mutex_unlock(&adev->pm.mutex);
490 }
491
492 return ret;
493 }
494
amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device * adev,bool acquire)495 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
496 bool acquire)
497 {
498 void *pp_handle = adev->powerplay.pp_handle;
499 const struct amd_pm_funcs *pp_funcs =
500 adev->powerplay.pp_funcs;
501 int ret = -EOPNOTSUPP;
502
503 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
504 mutex_lock(&adev->pm.mutex);
505 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
506 acquire);
507 mutex_unlock(&adev->pm.mutex);
508 }
509
510 return ret;
511 }
512
amdgpu_pm_acpi_event_handler(struct amdgpu_device * adev)513 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
514 {
515 if (adev->pm.dpm_enabled) {
516 mutex_lock(&adev->pm.mutex);
517 if (power_supply_is_system_supplied() > 0)
518 adev->pm.ac_power = true;
519 else
520 adev->pm.ac_power = false;
521
522 if (adev->powerplay.pp_funcs &&
523 adev->powerplay.pp_funcs->enable_bapm)
524 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
525
526 if (is_support_sw_smu(adev))
527 smu_set_ac_dc(adev->powerplay.pp_handle);
528
529 mutex_unlock(&adev->pm.mutex);
530 }
531 }
532
amdgpu_dpm_read_sensor(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * data,uint32_t * size)533 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
534 void *data, uint32_t *size)
535 {
536 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
537 int ret = -EINVAL;
538
539 if (!data || !size)
540 return -EINVAL;
541
542 if (pp_funcs && pp_funcs->read_sensor) {
543 mutex_lock(&adev->pm.mutex);
544 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
545 sensor,
546 data,
547 size);
548 mutex_unlock(&adev->pm.mutex);
549 }
550
551 return ret;
552 }
553
amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device * adev,uint32_t * limit)554 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
555 {
556 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
557 int ret = -EOPNOTSUPP;
558
559 if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
560 mutex_lock(&adev->pm.mutex);
561 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
562 mutex_unlock(&adev->pm.mutex);
563 }
564
565 return ret;
566 }
567
amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device * adev,uint32_t limit)568 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
569 {
570 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
571 int ret = -EOPNOTSUPP;
572
573 if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
574 mutex_lock(&adev->pm.mutex);
575 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
576 mutex_unlock(&adev->pm.mutex);
577 }
578
579 return ret;
580 }
581
amdgpu_dpm_compute_clocks(struct amdgpu_device * adev)582 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
583 {
584 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
585 int i;
586
587 if (!adev->pm.dpm_enabled)
588 return;
589
590 if (!pp_funcs->pm_compute_clocks)
591 return;
592
593 if (adev->mode_info.num_crtc)
594 amdgpu_display_bandwidth_update(adev);
595
596 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
597 struct amdgpu_ring *ring = adev->rings[i];
598 if (ring && ring->sched.ready)
599 amdgpu_fence_wait_empty(ring);
600 }
601
602 mutex_lock(&adev->pm.mutex);
603 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
604 mutex_unlock(&adev->pm.mutex);
605 }
606
amdgpu_dpm_enable_uvd(struct amdgpu_device * adev,bool enable)607 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
608 {
609 int ret = 0;
610
611 if (adev->family == AMDGPU_FAMILY_SI) {
612 mutex_lock(&adev->pm.mutex);
613 if (enable) {
614 adev->pm.dpm.uvd_active = true;
615 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
616 } else {
617 adev->pm.dpm.uvd_active = false;
618 }
619 mutex_unlock(&adev->pm.mutex);
620
621 amdgpu_dpm_compute_clocks(adev);
622 return;
623 }
624
625 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
626 if (ret)
627 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
628 enable ? "enable" : "disable", ret);
629 }
630
amdgpu_dpm_enable_vcn(struct amdgpu_device * adev,bool enable,int inst)631 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
632 {
633 int ret = 0;
634
635 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
636 if (ret)
637 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
638 enable ? "enable" : "disable", ret);
639 }
640
amdgpu_dpm_enable_vce(struct amdgpu_device * adev,bool enable)641 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
642 {
643 int ret = 0;
644
645 if (adev->family == AMDGPU_FAMILY_SI) {
646 mutex_lock(&adev->pm.mutex);
647 if (enable) {
648 adev->pm.dpm.vce_active = true;
649 /* XXX select vce level based on ring/task */
650 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
651 } else {
652 adev->pm.dpm.vce_active = false;
653 }
654 mutex_unlock(&adev->pm.mutex);
655
656 amdgpu_dpm_compute_clocks(adev);
657 return;
658 }
659
660 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
661 if (ret)
662 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
663 enable ? "enable" : "disable", ret);
664 }
665
amdgpu_dpm_enable_jpeg(struct amdgpu_device * adev,bool enable)666 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
667 {
668 int ret = 0;
669
670 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
671 if (ret)
672 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
673 enable ? "enable" : "disable", ret);
674 }
675
amdgpu_dpm_enable_vpe(struct amdgpu_device * adev,bool enable)676 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
677 {
678 int ret = 0;
679
680 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
681 if (ret)
682 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
683 enable ? "enable" : "disable", ret);
684 }
685
amdgpu_pm_load_smu_firmware(struct amdgpu_device * adev,uint32_t * smu_version)686 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
687 {
688 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
689 int r = 0;
690
691 if (!pp_funcs || !pp_funcs->load_firmware ||
692 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
693 return 0;
694
695 mutex_lock(&adev->pm.mutex);
696 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
697 if (r) {
698 pr_err("smu firmware loading failed\n");
699 goto out;
700 }
701
702 if (smu_version)
703 *smu_version = adev->pm.fw_version;
704
705 out:
706 mutex_unlock(&adev->pm.mutex);
707 return r;
708 }
709
amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device * adev,bool enable)710 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
711 {
712 int ret = 0;
713
714 if (is_support_sw_smu(adev)) {
715 mutex_lock(&adev->pm.mutex);
716 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
717 enable);
718 mutex_unlock(&adev->pm.mutex);
719 }
720
721 return ret;
722 }
723
amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device * adev,uint32_t size)724 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
725 {
726 struct smu_context *smu = adev->powerplay.pp_handle;
727 int ret = 0;
728
729 if (!is_support_sw_smu(adev))
730 return -EOPNOTSUPP;
731
732 mutex_lock(&adev->pm.mutex);
733 ret = smu_send_hbm_bad_pages_num(smu, size);
734 mutex_unlock(&adev->pm.mutex);
735
736 return ret;
737 }
738
amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device * adev,uint32_t size)739 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
740 {
741 struct smu_context *smu = adev->powerplay.pp_handle;
742 int ret = 0;
743
744 if (!is_support_sw_smu(adev))
745 return -EOPNOTSUPP;
746
747 mutex_lock(&adev->pm.mutex);
748 ret = smu_send_hbm_bad_channel_flag(smu, size);
749 mutex_unlock(&adev->pm.mutex);
750
751 return ret;
752 }
753
amdgpu_dpm_send_rma_reason(struct amdgpu_device * adev)754 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
755 {
756 struct smu_context *smu = adev->powerplay.pp_handle;
757 int ret;
758
759 if (!is_support_sw_smu(adev))
760 return -EOPNOTSUPP;
761
762 mutex_lock(&adev->pm.mutex);
763 ret = smu_send_rma_reason(smu);
764 mutex_unlock(&adev->pm.mutex);
765
766 if (adev->cper.enabled)
767 if (amdgpu_cper_generate_bp_threshold_record(adev))
768 dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
769
770 return ret;
771 }
772
773 /**
774 * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported
775 * @adev: amdgpu_device pointer
776 *
777 * This function checks if the SMU supports resetting the SDMA engine.
778 * It returns false if the hardware does not support software SMU or
779 * if the feature is not supported.
780 */
amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device * adev)781 bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
782 {
783 struct smu_context *smu = adev->powerplay.pp_handle;
784 bool ret;
785
786 if (!is_support_sw_smu(adev))
787 return false;
788
789 mutex_lock(&adev->pm.mutex);
790 ret = smu_reset_sdma_is_supported(smu);
791 mutex_unlock(&adev->pm.mutex);
792
793 return ret;
794 }
795
amdgpu_dpm_reset_sdma(struct amdgpu_device * adev,uint32_t inst_mask)796 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
797 {
798 struct smu_context *smu = adev->powerplay.pp_handle;
799 int ret;
800
801 if (!is_support_sw_smu(adev))
802 return -EOPNOTSUPP;
803
804 mutex_lock(&adev->pm.mutex);
805 ret = smu_reset_sdma(smu, inst_mask);
806 mutex_unlock(&adev->pm.mutex);
807
808 return ret;
809 }
810
amdgpu_dpm_reset_vcn(struct amdgpu_device * adev,uint32_t inst_mask)811 int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
812 {
813 struct smu_context *smu = adev->powerplay.pp_handle;
814 int ret;
815
816 if (!is_support_sw_smu(adev))
817 return -EOPNOTSUPP;
818
819 mutex_lock(&adev->pm.mutex);
820 ret = smu_reset_vcn(smu, inst_mask);
821 mutex_unlock(&adev->pm.mutex);
822
823 return ret;
824 }
825
amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t * min,uint32_t * max)826 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
827 enum pp_clock_type type,
828 uint32_t *min,
829 uint32_t *max)
830 {
831 int ret = 0;
832
833 if (type != PP_SCLK)
834 return -EINVAL;
835
836 if (!is_support_sw_smu(adev))
837 return -EOPNOTSUPP;
838
839 mutex_lock(&adev->pm.mutex);
840 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
841 SMU_SCLK,
842 min,
843 max);
844 mutex_unlock(&adev->pm.mutex);
845
846 return ret;
847 }
848
amdgpu_dpm_set_soft_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t min,uint32_t max)849 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
850 enum pp_clock_type type,
851 uint32_t min,
852 uint32_t max)
853 {
854 struct smu_context *smu = adev->powerplay.pp_handle;
855 int ret = 0;
856
857 if (type != PP_SCLK)
858 return -EINVAL;
859
860 if (!is_support_sw_smu(adev))
861 return -EOPNOTSUPP;
862
863 mutex_lock(&adev->pm.mutex);
864 ret = smu_set_soft_freq_range(smu,
865 SMU_SCLK,
866 min,
867 max);
868 mutex_unlock(&adev->pm.mutex);
869
870 return ret;
871 }
872
amdgpu_dpm_write_watermarks_table(struct amdgpu_device * adev)873 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
874 {
875 struct smu_context *smu = adev->powerplay.pp_handle;
876 int ret = 0;
877
878 if (!is_support_sw_smu(adev))
879 return 0;
880
881 mutex_lock(&adev->pm.mutex);
882 ret = smu_write_watermarks_table(smu);
883 mutex_unlock(&adev->pm.mutex);
884
885 return ret;
886 }
887
amdgpu_dpm_wait_for_event(struct amdgpu_device * adev,enum smu_event_type event,uint64_t event_arg)888 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
889 enum smu_event_type event,
890 uint64_t event_arg)
891 {
892 struct smu_context *smu = adev->powerplay.pp_handle;
893 int ret = 0;
894
895 if (!is_support_sw_smu(adev))
896 return -EOPNOTSUPP;
897
898 mutex_lock(&adev->pm.mutex);
899 ret = smu_wait_for_event(smu, event, event_arg);
900 mutex_unlock(&adev->pm.mutex);
901
902 return ret;
903 }
904
amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device * adev,bool value)905 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
906 {
907 struct smu_context *smu = adev->powerplay.pp_handle;
908 int ret = 0;
909
910 if (!is_support_sw_smu(adev))
911 return -EOPNOTSUPP;
912
913 mutex_lock(&adev->pm.mutex);
914 ret = smu_set_residency_gfxoff(smu, value);
915 mutex_unlock(&adev->pm.mutex);
916
917 return ret;
918 }
919
amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device * adev,u32 * value)920 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
921 {
922 struct smu_context *smu = adev->powerplay.pp_handle;
923 int ret = 0;
924
925 if (!is_support_sw_smu(adev))
926 return -EOPNOTSUPP;
927
928 mutex_lock(&adev->pm.mutex);
929 ret = smu_get_residency_gfxoff(smu, value);
930 mutex_unlock(&adev->pm.mutex);
931
932 return ret;
933 }
934
amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device * adev,u64 * value)935 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
936 {
937 struct smu_context *smu = adev->powerplay.pp_handle;
938 int ret = 0;
939
940 if (!is_support_sw_smu(adev))
941 return -EOPNOTSUPP;
942
943 mutex_lock(&adev->pm.mutex);
944 ret = smu_get_entrycount_gfxoff(smu, value);
945 mutex_unlock(&adev->pm.mutex);
946
947 return ret;
948 }
949
amdgpu_dpm_get_status_gfxoff(struct amdgpu_device * adev,uint32_t * value)950 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
951 {
952 struct smu_context *smu = adev->powerplay.pp_handle;
953 int ret = 0;
954
955 if (!is_support_sw_smu(adev))
956 return -EOPNOTSUPP;
957
958 mutex_lock(&adev->pm.mutex);
959 ret = smu_get_status_gfxoff(smu, value);
960 mutex_unlock(&adev->pm.mutex);
961
962 return ret;
963 }
964
amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device * adev)965 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
966 {
967 struct smu_context *smu = adev->powerplay.pp_handle;
968
969 if (!is_support_sw_smu(adev))
970 return 0;
971
972 return atomic64_read(&smu->throttle_int_counter);
973 }
974
975 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
976 * @adev: amdgpu_device pointer
977 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
978 *
979 */
amdgpu_dpm_gfx_state_change(struct amdgpu_device * adev,enum gfx_change_state state)980 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
981 enum gfx_change_state state)
982 {
983 mutex_lock(&adev->pm.mutex);
984 if (adev->powerplay.pp_funcs &&
985 adev->powerplay.pp_funcs->gfx_state_change_set)
986 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
987 (adev)->powerplay.pp_handle, state));
988 mutex_unlock(&adev->pm.mutex);
989 }
990
amdgpu_dpm_get_ecc_info(struct amdgpu_device * adev,void * umc_ecc)991 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
992 void *umc_ecc)
993 {
994 struct smu_context *smu = adev->powerplay.pp_handle;
995 int ret = 0;
996
997 if (!is_support_sw_smu(adev))
998 return -EOPNOTSUPP;
999
1000 mutex_lock(&adev->pm.mutex);
1001 ret = smu_get_ecc_info(smu, umc_ecc);
1002 mutex_unlock(&adev->pm.mutex);
1003
1004 return ret;
1005 }
1006
amdgpu_dpm_get_vce_clock_state(struct amdgpu_device * adev,uint32_t idx)1007 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
1008 uint32_t idx)
1009 {
1010 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1011 struct amd_vce_state *vstate = NULL;
1012
1013 if (!pp_funcs->get_vce_clock_state)
1014 return NULL;
1015
1016 mutex_lock(&adev->pm.mutex);
1017 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
1018 idx);
1019 mutex_unlock(&adev->pm.mutex);
1020
1021 return vstate;
1022 }
1023
amdgpu_dpm_get_current_power_state(struct amdgpu_device * adev,enum amd_pm_state_type * state)1024 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
1025 enum amd_pm_state_type *state)
1026 {
1027 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1028
1029 mutex_lock(&adev->pm.mutex);
1030
1031 if (!pp_funcs->get_current_power_state) {
1032 *state = adev->pm.dpm.user_state;
1033 goto out;
1034 }
1035
1036 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
1037 if (*state < POWER_STATE_TYPE_DEFAULT ||
1038 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
1039 *state = adev->pm.dpm.user_state;
1040
1041 out:
1042 mutex_unlock(&adev->pm.mutex);
1043 }
1044
amdgpu_dpm_set_power_state(struct amdgpu_device * adev,enum amd_pm_state_type state)1045 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
1046 enum amd_pm_state_type state)
1047 {
1048 mutex_lock(&adev->pm.mutex);
1049 adev->pm.dpm.user_state = state;
1050 mutex_unlock(&adev->pm.mutex);
1051
1052 if (is_support_sw_smu(adev))
1053 return;
1054
1055 if (amdgpu_dpm_dispatch_task(adev,
1056 AMD_PP_TASK_ENABLE_USER_STATE,
1057 &state) == -EOPNOTSUPP)
1058 amdgpu_dpm_compute_clocks(adev);
1059 }
1060
amdgpu_dpm_get_performance_level(struct amdgpu_device * adev)1061 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
1062 {
1063 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1064 enum amd_dpm_forced_level level;
1065
1066 if (!pp_funcs)
1067 return AMD_DPM_FORCED_LEVEL_AUTO;
1068
1069 mutex_lock(&adev->pm.mutex);
1070 if (pp_funcs->get_performance_level)
1071 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
1072 else
1073 level = adev->pm.dpm.forced_level;
1074 mutex_unlock(&adev->pm.mutex);
1075
1076 return level;
1077 }
1078
amdgpu_dpm_enter_umd_state(struct amdgpu_device * adev)1079 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
1080 {
1081 /* enter UMD Pstate */
1082 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1083 AMD_PG_STATE_UNGATE);
1084 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1085 AMD_CG_STATE_UNGATE);
1086 }
1087
amdgpu_dpm_exit_umd_state(struct amdgpu_device * adev)1088 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
1089 {
1090 /* exit UMD Pstate */
1091 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1092 AMD_CG_STATE_GATE);
1093 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1094 AMD_PG_STATE_GATE);
1095 }
1096
amdgpu_dpm_force_performance_level(struct amdgpu_device * adev,enum amd_dpm_forced_level level)1097 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
1098 enum amd_dpm_forced_level level)
1099 {
1100 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1101 enum amd_dpm_forced_level current_level;
1102 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1103 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1104 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1105 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1106
1107 if (!pp_funcs || !pp_funcs->force_performance_level)
1108 return 0;
1109
1110 if (adev->pm.dpm.thermal_active)
1111 return -EINVAL;
1112
1113 current_level = amdgpu_dpm_get_performance_level(adev);
1114 if (current_level == level)
1115 return 0;
1116
1117 if (!(current_level & profile_mode_mask) &&
1118 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
1119 return -EINVAL;
1120
1121 if (adev->asic_type == CHIP_RAVEN) {
1122 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
1123 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1124 level == AMD_DPM_FORCED_LEVEL_MANUAL)
1125 amdgpu_gfx_off_ctrl(adev, false);
1126 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
1127 level != AMD_DPM_FORCED_LEVEL_MANUAL)
1128 amdgpu_gfx_off_ctrl(adev, true);
1129 }
1130 }
1131
1132 if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
1133 amdgpu_dpm_enter_umd_state(adev);
1134 else if ((current_level & profile_mode_mask) &&
1135 !(level & profile_mode_mask))
1136 amdgpu_dpm_exit_umd_state(adev);
1137
1138 mutex_lock(&adev->pm.mutex);
1139
1140 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1141 level)) {
1142 mutex_unlock(&adev->pm.mutex);
1143 /* If new level failed, retain the umd state as before */
1144 if (!(current_level & profile_mode_mask) &&
1145 (level & profile_mode_mask))
1146 amdgpu_dpm_exit_umd_state(adev);
1147 else if ((current_level & profile_mode_mask) &&
1148 !(level & profile_mode_mask))
1149 amdgpu_dpm_enter_umd_state(adev);
1150
1151 return -EINVAL;
1152 }
1153
1154 adev->pm.dpm.forced_level = level;
1155
1156 mutex_unlock(&adev->pm.mutex);
1157
1158 return 0;
1159 }
1160
amdgpu_dpm_get_pp_num_states(struct amdgpu_device * adev,struct pp_states_info * states)1161 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1162 struct pp_states_info *states)
1163 {
1164 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1165 int ret = 0;
1166
1167 if (!pp_funcs->get_pp_num_states)
1168 return -EOPNOTSUPP;
1169
1170 mutex_lock(&adev->pm.mutex);
1171 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1172 states);
1173 mutex_unlock(&adev->pm.mutex);
1174
1175 return ret;
1176 }
1177
amdgpu_dpm_dispatch_task(struct amdgpu_device * adev,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)1178 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1179 enum amd_pp_task task_id,
1180 enum amd_pm_state_type *user_state)
1181 {
1182 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1183 int ret = 0;
1184
1185 if (!pp_funcs->dispatch_tasks)
1186 return -EOPNOTSUPP;
1187
1188 mutex_lock(&adev->pm.mutex);
1189 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1190 task_id,
1191 user_state);
1192 mutex_unlock(&adev->pm.mutex);
1193
1194 return ret;
1195 }
1196
amdgpu_dpm_get_pp_table(struct amdgpu_device * adev,char ** table)1197 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1198 {
1199 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1200 int ret = 0;
1201
1202 if (!pp_funcs->get_pp_table)
1203 return 0;
1204
1205 mutex_lock(&adev->pm.mutex);
1206 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1207 table);
1208 mutex_unlock(&adev->pm.mutex);
1209
1210 return ret;
1211 }
1212
amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1213 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1214 uint32_t type,
1215 long *input,
1216 uint32_t size)
1217 {
1218 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1219 int ret = 0;
1220
1221 if (!pp_funcs->set_fine_grain_clk_vol)
1222 return 0;
1223
1224 mutex_lock(&adev->pm.mutex);
1225 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1226 type,
1227 input,
1228 size);
1229 mutex_unlock(&adev->pm.mutex);
1230
1231 return ret;
1232 }
1233
amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1234 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1235 uint32_t type,
1236 long *input,
1237 uint32_t size)
1238 {
1239 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1240 int ret = 0;
1241
1242 if (!pp_funcs->odn_edit_dpm_table)
1243 return 0;
1244
1245 mutex_lock(&adev->pm.mutex);
1246 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1247 type,
1248 input,
1249 size);
1250 mutex_unlock(&adev->pm.mutex);
1251
1252 return ret;
1253 }
1254
amdgpu_dpm_print_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf)1255 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1256 enum pp_clock_type type,
1257 char *buf)
1258 {
1259 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1260 int ret = 0;
1261
1262 if (!pp_funcs->print_clock_levels)
1263 return 0;
1264
1265 mutex_lock(&adev->pm.mutex);
1266 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1267 type,
1268 buf);
1269 mutex_unlock(&adev->pm.mutex);
1270
1271 return ret;
1272 }
1273
amdgpu_dpm_emit_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf,int * offset)1274 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1275 enum pp_clock_type type,
1276 char *buf,
1277 int *offset)
1278 {
1279 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1280 int ret = 0;
1281
1282 if (!pp_funcs->emit_clock_levels)
1283 return -ENOENT;
1284
1285 mutex_lock(&adev->pm.mutex);
1286 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1287 type,
1288 buf,
1289 offset);
1290 mutex_unlock(&adev->pm.mutex);
1291
1292 return ret;
1293 }
1294
amdgpu_dpm_set_ppfeature_status(struct amdgpu_device * adev,uint64_t ppfeature_masks)1295 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1296 uint64_t ppfeature_masks)
1297 {
1298 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1299 int ret = 0;
1300
1301 if (!pp_funcs->set_ppfeature_status)
1302 return 0;
1303
1304 mutex_lock(&adev->pm.mutex);
1305 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1306 ppfeature_masks);
1307 mutex_unlock(&adev->pm.mutex);
1308
1309 return ret;
1310 }
1311
amdgpu_dpm_get_ppfeature_status(struct amdgpu_device * adev,char * buf)1312 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1313 {
1314 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1315 int ret = 0;
1316
1317 if (!pp_funcs->get_ppfeature_status)
1318 return 0;
1319
1320 mutex_lock(&adev->pm.mutex);
1321 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1322 buf);
1323 mutex_unlock(&adev->pm.mutex);
1324
1325 return ret;
1326 }
1327
amdgpu_dpm_force_clock_level(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t mask)1328 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1329 enum pp_clock_type type,
1330 uint32_t mask)
1331 {
1332 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1333 int ret = 0;
1334
1335 if (!pp_funcs->force_clock_level)
1336 return 0;
1337
1338 mutex_lock(&adev->pm.mutex);
1339 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1340 type,
1341 mask);
1342 mutex_unlock(&adev->pm.mutex);
1343
1344 return ret;
1345 }
1346
amdgpu_dpm_get_sclk_od(struct amdgpu_device * adev)1347 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1348 {
1349 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1350 int ret = 0;
1351
1352 if (!pp_funcs->get_sclk_od)
1353 return -EOPNOTSUPP;
1354
1355 mutex_lock(&adev->pm.mutex);
1356 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1357 mutex_unlock(&adev->pm.mutex);
1358
1359 return ret;
1360 }
1361
amdgpu_dpm_set_sclk_od(struct amdgpu_device * adev,uint32_t value)1362 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1363 {
1364 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1365
1366 if (is_support_sw_smu(adev))
1367 return -EOPNOTSUPP;
1368
1369 mutex_lock(&adev->pm.mutex);
1370 if (pp_funcs->set_sclk_od)
1371 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1372 mutex_unlock(&adev->pm.mutex);
1373
1374 if (amdgpu_dpm_dispatch_task(adev,
1375 AMD_PP_TASK_READJUST_POWER_STATE,
1376 NULL) == -EOPNOTSUPP) {
1377 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1378 amdgpu_dpm_compute_clocks(adev);
1379 }
1380
1381 return 0;
1382 }
1383
amdgpu_dpm_get_mclk_od(struct amdgpu_device * adev)1384 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1385 {
1386 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1387 int ret = 0;
1388
1389 if (!pp_funcs->get_mclk_od)
1390 return -EOPNOTSUPP;
1391
1392 mutex_lock(&adev->pm.mutex);
1393 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1394 mutex_unlock(&adev->pm.mutex);
1395
1396 return ret;
1397 }
1398
amdgpu_dpm_set_mclk_od(struct amdgpu_device * adev,uint32_t value)1399 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1400 {
1401 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1402
1403 if (is_support_sw_smu(adev))
1404 return -EOPNOTSUPP;
1405
1406 mutex_lock(&adev->pm.mutex);
1407 if (pp_funcs->set_mclk_od)
1408 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1409 mutex_unlock(&adev->pm.mutex);
1410
1411 if (amdgpu_dpm_dispatch_task(adev,
1412 AMD_PP_TASK_READJUST_POWER_STATE,
1413 NULL) == -EOPNOTSUPP) {
1414 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1415 amdgpu_dpm_compute_clocks(adev);
1416 }
1417
1418 return 0;
1419 }
1420
amdgpu_dpm_get_power_profile_mode(struct amdgpu_device * adev,char * buf)1421 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1422 char *buf)
1423 {
1424 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1425 int ret = 0;
1426
1427 if (!pp_funcs->get_power_profile_mode)
1428 return -EOPNOTSUPP;
1429
1430 mutex_lock(&adev->pm.mutex);
1431 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1432 buf);
1433 mutex_unlock(&adev->pm.mutex);
1434
1435 return ret;
1436 }
1437
amdgpu_dpm_set_power_profile_mode(struct amdgpu_device * adev,long * input,uint32_t size)1438 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1439 long *input, uint32_t size)
1440 {
1441 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1442 int ret = 0;
1443
1444 if (!pp_funcs->set_power_profile_mode)
1445 return 0;
1446
1447 mutex_lock(&adev->pm.mutex);
1448 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1449 input,
1450 size);
1451 mutex_unlock(&adev->pm.mutex);
1452
1453 return ret;
1454 }
1455
amdgpu_dpm_get_gpu_metrics(struct amdgpu_device * adev,void ** table)1456 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1457 {
1458 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1459 int ret = 0;
1460
1461 if (!pp_funcs->get_gpu_metrics)
1462 return 0;
1463
1464 mutex_lock(&adev->pm.mutex);
1465 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1466 table);
1467 mutex_unlock(&adev->pm.mutex);
1468
1469 return ret;
1470 }
1471
amdgpu_dpm_get_pm_metrics(struct amdgpu_device * adev,void * pm_metrics,size_t size)1472 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1473 size_t size)
1474 {
1475 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1476 int ret = 0;
1477
1478 if (!pp_funcs->get_pm_metrics)
1479 return -EOPNOTSUPP;
1480
1481 mutex_lock(&adev->pm.mutex);
1482 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1483 size);
1484 mutex_unlock(&adev->pm.mutex);
1485
1486 return ret;
1487 }
1488
amdgpu_dpm_get_fan_control_mode(struct amdgpu_device * adev,uint32_t * fan_mode)1489 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1490 uint32_t *fan_mode)
1491 {
1492 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1493 int ret = 0;
1494
1495 if (!pp_funcs->get_fan_control_mode)
1496 return -EOPNOTSUPP;
1497
1498 mutex_lock(&adev->pm.mutex);
1499 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1500 fan_mode);
1501 mutex_unlock(&adev->pm.mutex);
1502
1503 return ret;
1504 }
1505
amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device * adev,uint32_t speed)1506 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1507 uint32_t speed)
1508 {
1509 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1510 int ret = 0;
1511
1512 if (!pp_funcs->set_fan_speed_pwm)
1513 return -EOPNOTSUPP;
1514
1515 mutex_lock(&adev->pm.mutex);
1516 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1517 speed);
1518 mutex_unlock(&adev->pm.mutex);
1519
1520 return ret;
1521 }
1522
amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device * adev,uint32_t * speed)1523 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1524 uint32_t *speed)
1525 {
1526 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1527 int ret = 0;
1528
1529 if (!pp_funcs->get_fan_speed_pwm)
1530 return -EOPNOTSUPP;
1531
1532 mutex_lock(&adev->pm.mutex);
1533 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1534 speed);
1535 mutex_unlock(&adev->pm.mutex);
1536
1537 return ret;
1538 }
1539
amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device * adev,uint32_t * speed)1540 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1541 uint32_t *speed)
1542 {
1543 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1544 int ret = 0;
1545
1546 if (!pp_funcs->get_fan_speed_rpm)
1547 return -EOPNOTSUPP;
1548
1549 mutex_lock(&adev->pm.mutex);
1550 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1551 speed);
1552 mutex_unlock(&adev->pm.mutex);
1553
1554 return ret;
1555 }
1556
amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device * adev,uint32_t speed)1557 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1558 uint32_t speed)
1559 {
1560 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1561 int ret = 0;
1562
1563 if (!pp_funcs->set_fan_speed_rpm)
1564 return -EOPNOTSUPP;
1565
1566 mutex_lock(&adev->pm.mutex);
1567 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1568 speed);
1569 mutex_unlock(&adev->pm.mutex);
1570
1571 return ret;
1572 }
1573
amdgpu_dpm_set_fan_control_mode(struct amdgpu_device * adev,uint32_t mode)1574 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1575 uint32_t mode)
1576 {
1577 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1578 int ret = 0;
1579
1580 if (!pp_funcs->set_fan_control_mode)
1581 return -EOPNOTSUPP;
1582
1583 mutex_lock(&adev->pm.mutex);
1584 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1585 mode);
1586 mutex_unlock(&adev->pm.mutex);
1587
1588 return ret;
1589 }
1590
amdgpu_dpm_get_power_limit(struct amdgpu_device * adev,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)1591 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1592 uint32_t *limit,
1593 enum pp_power_limit_level pp_limit_level,
1594 enum pp_power_type power_type)
1595 {
1596 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1597 int ret = 0;
1598
1599 if (!pp_funcs->get_power_limit)
1600 return -ENODATA;
1601
1602 mutex_lock(&adev->pm.mutex);
1603 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1604 limit,
1605 pp_limit_level,
1606 power_type);
1607 mutex_unlock(&adev->pm.mutex);
1608
1609 return ret;
1610 }
1611
amdgpu_dpm_set_power_limit(struct amdgpu_device * adev,uint32_t limit)1612 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1613 uint32_t limit)
1614 {
1615 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1616 int ret = 0;
1617
1618 if (!pp_funcs->set_power_limit)
1619 return -EINVAL;
1620
1621 mutex_lock(&adev->pm.mutex);
1622 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1623 limit);
1624 mutex_unlock(&adev->pm.mutex);
1625
1626 return ret;
1627 }
1628
amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device * adev)1629 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1630 {
1631 bool cclk_dpm_supported = false;
1632
1633 if (!is_support_sw_smu(adev))
1634 return false;
1635
1636 mutex_lock(&adev->pm.mutex);
1637 cclk_dpm_supported = is_support_cclk_dpm(adev);
1638 mutex_unlock(&adev->pm.mutex);
1639
1640 return (int)cclk_dpm_supported;
1641 }
1642
amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device * adev,struct seq_file * m)1643 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1644 struct seq_file *m)
1645 {
1646 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1647
1648 if (!pp_funcs->debugfs_print_current_performance_level)
1649 return -EOPNOTSUPP;
1650
1651 mutex_lock(&adev->pm.mutex);
1652 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1653 m);
1654 mutex_unlock(&adev->pm.mutex);
1655
1656 return 0;
1657 }
1658
amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device * adev,void ** addr,size_t * size)1659 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1660 void **addr,
1661 size_t *size)
1662 {
1663 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1664 int ret = 0;
1665
1666 if (!pp_funcs->get_smu_prv_buf_details)
1667 return -ENOSYS;
1668
1669 mutex_lock(&adev->pm.mutex);
1670 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1671 addr,
1672 size);
1673 mutex_unlock(&adev->pm.mutex);
1674
1675 return ret;
1676 }
1677
amdgpu_dpm_is_overdrive_supported(struct amdgpu_device * adev)1678 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1679 {
1680 if (is_support_sw_smu(adev)) {
1681 struct smu_context *smu = adev->powerplay.pp_handle;
1682
1683 return (smu->od_enabled || smu->is_apu);
1684 } else {
1685 struct pp_hwmgr *hwmgr;
1686
1687 /*
1688 * dpm on some legacy asics don't carry od_enabled member
1689 * as its pp_handle is casted directly from adev.
1690 */
1691 if (amdgpu_dpm_is_legacy_dpm(adev))
1692 return false;
1693
1694 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1695
1696 return hwmgr->od_enabled;
1697 }
1698 }
1699
amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device * adev)1700 int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev)
1701 {
1702 if (is_support_sw_smu(adev)) {
1703 struct smu_context *smu = adev->powerplay.pp_handle;
1704
1705 return smu->od_enabled;
1706 } else {
1707 struct pp_hwmgr *hwmgr;
1708
1709 /*
1710 * dpm on some legacy asics don't carry od_enabled member
1711 * as its pp_handle is casted directly from adev.
1712 */
1713 if (amdgpu_dpm_is_legacy_dpm(adev))
1714 return false;
1715
1716 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1717
1718 return hwmgr->od_enabled;
1719 }
1720 }
1721
amdgpu_dpm_set_pp_table(struct amdgpu_device * adev,const char * buf,size_t size)1722 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1723 const char *buf,
1724 size_t size)
1725 {
1726 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1727 int ret = 0;
1728
1729 if (!pp_funcs->set_pp_table)
1730 return -EOPNOTSUPP;
1731
1732 mutex_lock(&adev->pm.mutex);
1733 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1734 buf,
1735 size);
1736 mutex_unlock(&adev->pm.mutex);
1737
1738 return ret;
1739 }
1740
amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device * adev)1741 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1742 {
1743 struct smu_context *smu = adev->powerplay.pp_handle;
1744
1745 if (!is_support_sw_smu(adev))
1746 return INT_MAX;
1747
1748 return smu->cpu_core_num;
1749 }
1750
amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device * adev)1751 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1752 {
1753 if (!is_support_sw_smu(adev))
1754 return;
1755
1756 amdgpu_smu_stb_debug_fs_init(adev);
1757 }
1758
amdgpu_dpm_display_configuration_change(struct amdgpu_device * adev,const struct amd_pp_display_configuration * input)1759 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1760 const struct amd_pp_display_configuration *input)
1761 {
1762 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1763 int ret = 0;
1764
1765 if (!pp_funcs->display_configuration_change)
1766 return 0;
1767
1768 mutex_lock(&adev->pm.mutex);
1769 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1770 input);
1771 mutex_unlock(&adev->pm.mutex);
1772
1773 return ret;
1774 }
1775
amdgpu_dpm_get_clock_by_type(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1776 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1777 enum amd_pp_clock_type type,
1778 struct amd_pp_clocks *clocks)
1779 {
1780 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1781 int ret = 0;
1782
1783 if (!pp_funcs->get_clock_by_type)
1784 return 0;
1785
1786 mutex_lock(&adev->pm.mutex);
1787 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1788 type,
1789 clocks);
1790 mutex_unlock(&adev->pm.mutex);
1791
1792 return ret;
1793 }
1794
amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device * adev,struct amd_pp_simple_clock_info * clocks)1795 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1796 struct amd_pp_simple_clock_info *clocks)
1797 {
1798 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1799 int ret = 0;
1800
1801 if (!pp_funcs->get_display_mode_validation_clocks)
1802 return 0;
1803
1804 mutex_lock(&adev->pm.mutex);
1805 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1806 clocks);
1807 mutex_unlock(&adev->pm.mutex);
1808
1809 return ret;
1810 }
1811
amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1812 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1813 enum amd_pp_clock_type type,
1814 struct pp_clock_levels_with_latency *clocks)
1815 {
1816 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1817 int ret = 0;
1818
1819 if (!pp_funcs->get_clock_by_type_with_latency)
1820 return 0;
1821
1822 mutex_lock(&adev->pm.mutex);
1823 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1824 type,
1825 clocks);
1826 mutex_unlock(&adev->pm.mutex);
1827
1828 return ret;
1829 }
1830
amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1831 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1832 enum amd_pp_clock_type type,
1833 struct pp_clock_levels_with_voltage *clocks)
1834 {
1835 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1836 int ret = 0;
1837
1838 if (!pp_funcs->get_clock_by_type_with_voltage)
1839 return 0;
1840
1841 mutex_lock(&adev->pm.mutex);
1842 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1843 type,
1844 clocks);
1845 mutex_unlock(&adev->pm.mutex);
1846
1847 return ret;
1848 }
1849
amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device * adev,void * clock_ranges)1850 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1851 void *clock_ranges)
1852 {
1853 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1854 int ret = 0;
1855
1856 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1857 return -EOPNOTSUPP;
1858
1859 mutex_lock(&adev->pm.mutex);
1860 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1861 clock_ranges);
1862 mutex_unlock(&adev->pm.mutex);
1863
1864 return ret;
1865 }
1866
amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device * adev,struct pp_display_clock_request * clock)1867 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1868 struct pp_display_clock_request *clock)
1869 {
1870 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1871 int ret = 0;
1872
1873 if (!pp_funcs->display_clock_voltage_request)
1874 return -EOPNOTSUPP;
1875
1876 mutex_lock(&adev->pm.mutex);
1877 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1878 clock);
1879 mutex_unlock(&adev->pm.mutex);
1880
1881 return ret;
1882 }
1883
amdgpu_dpm_get_current_clocks(struct amdgpu_device * adev,struct amd_pp_clock_info * clocks)1884 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1885 struct amd_pp_clock_info *clocks)
1886 {
1887 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1888 int ret = 0;
1889
1890 if (!pp_funcs->get_current_clocks)
1891 return -EOPNOTSUPP;
1892
1893 mutex_lock(&adev->pm.mutex);
1894 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1895 clocks);
1896 mutex_unlock(&adev->pm.mutex);
1897
1898 return ret;
1899 }
1900
amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device * adev)1901 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1902 {
1903 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1904
1905 if (!pp_funcs->notify_smu_enable_pwe)
1906 return;
1907
1908 mutex_lock(&adev->pm.mutex);
1909 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1910 mutex_unlock(&adev->pm.mutex);
1911 }
1912
amdgpu_dpm_set_active_display_count(struct amdgpu_device * adev,uint32_t count)1913 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1914 uint32_t count)
1915 {
1916 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1917 int ret = 0;
1918
1919 if (!pp_funcs->set_active_display_count)
1920 return -EOPNOTSUPP;
1921
1922 mutex_lock(&adev->pm.mutex);
1923 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1924 count);
1925 mutex_unlock(&adev->pm.mutex);
1926
1927 return ret;
1928 }
1929
amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device * adev,uint32_t clock)1930 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1931 uint32_t clock)
1932 {
1933 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1934 int ret = 0;
1935
1936 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1937 return -EOPNOTSUPP;
1938
1939 mutex_lock(&adev->pm.mutex);
1940 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1941 clock);
1942 mutex_unlock(&adev->pm.mutex);
1943
1944 return ret;
1945 }
1946
amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1947 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1948 uint32_t clock)
1949 {
1950 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1951
1952 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1953 return;
1954
1955 mutex_lock(&adev->pm.mutex);
1956 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1957 clock);
1958 mutex_unlock(&adev->pm.mutex);
1959 }
1960
amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1961 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1962 uint32_t clock)
1963 {
1964 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1965
1966 if (!pp_funcs->set_hard_min_fclk_by_freq)
1967 return;
1968
1969 mutex_lock(&adev->pm.mutex);
1970 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1971 clock);
1972 mutex_unlock(&adev->pm.mutex);
1973 }
1974
amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device * adev,bool disable_memory_clock_switch)1975 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1976 bool disable_memory_clock_switch)
1977 {
1978 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1979 int ret = 0;
1980
1981 if (!pp_funcs->display_disable_memory_clock_switch)
1982 return 0;
1983
1984 mutex_lock(&adev->pm.mutex);
1985 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1986 disable_memory_clock_switch);
1987 mutex_unlock(&adev->pm.mutex);
1988
1989 return ret;
1990 }
1991
amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device * adev,struct pp_smu_nv_clock_table * max_clocks)1992 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1993 struct pp_smu_nv_clock_table *max_clocks)
1994 {
1995 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1996 int ret = 0;
1997
1998 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1999 return -EOPNOTSUPP;
2000
2001 mutex_lock(&adev->pm.mutex);
2002 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
2003 max_clocks);
2004 mutex_unlock(&adev->pm.mutex);
2005
2006 return ret;
2007 }
2008
amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device * adev,unsigned int * clock_values_in_khz,unsigned int * num_states)2009 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
2010 unsigned int *clock_values_in_khz,
2011 unsigned int *num_states)
2012 {
2013 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2014 int ret = 0;
2015
2016 if (!pp_funcs->get_uclk_dpm_states)
2017 return -EOPNOTSUPP;
2018
2019 mutex_lock(&adev->pm.mutex);
2020 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
2021 clock_values_in_khz,
2022 num_states);
2023 mutex_unlock(&adev->pm.mutex);
2024
2025 return ret;
2026 }
2027
amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device * adev,struct dpm_clocks * clock_table)2028 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
2029 struct dpm_clocks *clock_table)
2030 {
2031 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2032 int ret = 0;
2033
2034 if (!pp_funcs->get_dpm_clock_table)
2035 return -EOPNOTSUPP;
2036
2037 mutex_lock(&adev->pm.mutex);
2038 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
2039 clock_table);
2040 mutex_unlock(&adev->pm.mutex);
2041
2042 return ret;
2043 }
2044
2045 /**
2046 * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
2047 * partition
2048 * @adev: Pointer to the device.
2049 * @xcp_id: Identifier of the XCP for which metrics are to be retrieved.
2050 * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
2051 * function returns the size of the metrics structure.
2052 *
2053 * This function retrieves metrics for a specific XCP, including details such as
2054 * VCN/JPEG activity, clock frequencies, and other performance metrics. If the
2055 * table parameter is NULL, the function returns the size of the metrics
2056 * structure without populating it.
2057 *
2058 * Return: Size of the metrics structure on success, or a negative error code on failure.
2059 */
amdgpu_dpm_get_xcp_metrics(struct amdgpu_device * adev,int xcp_id,void * table)2060 ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
2061 void *table)
2062 {
2063 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2064 int ret = 0;
2065
2066 if (!pp_funcs->get_xcp_metrics)
2067 return 0;
2068
2069 mutex_lock(&adev->pm.mutex);
2070 ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id,
2071 table);
2072 mutex_unlock(&adev->pm.mutex);
2073
2074 return ret;
2075 }
2076