xref: /linux/drivers/gpu/drm/amd/pm/amdgpu_dpm.c (revision 5c8d5e2619f7d2985adfe45608dc942ca8151aa3)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35 
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38 
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40 
41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 	int ret = 0;
45 
46 	if (!pp_funcs->get_sclk)
47 		return 0;
48 
49 	mutex_lock(&adev->pm.mutex);
50 	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 				 low);
52 	mutex_unlock(&adev->pm.mutex);
53 
54 	return ret;
55 }
56 
57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 	int ret = 0;
61 
62 	if (!pp_funcs->get_mclk)
63 		return 0;
64 
65 	mutex_lock(&adev->pm.mutex);
66 	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 				 low);
68 	mutex_unlock(&adev->pm.mutex);
69 
70 	return ret;
71 }
72 
73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
74 				       uint32_t block_type,
75 				       bool gate,
76 				       int inst)
77 {
78 	int ret = 0;
79 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
80 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
81 	bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
82 
83 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
84 			(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
85 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
86 				block_type, gate ? "gate" : "ungate");
87 		return 0;
88 	}
89 
90 	mutex_lock(&adev->pm.mutex);
91 
92 	switch (block_type) {
93 	case AMD_IP_BLOCK_TYPE_UVD:
94 	case AMD_IP_BLOCK_TYPE_VCE:
95 	case AMD_IP_BLOCK_TYPE_GFX:
96 	case AMD_IP_BLOCK_TYPE_SDMA:
97 	case AMD_IP_BLOCK_TYPE_JPEG:
98 	case AMD_IP_BLOCK_TYPE_GMC:
99 	case AMD_IP_BLOCK_TYPE_ACP:
100 	case AMD_IP_BLOCK_TYPE_VPE:
101 	case AMD_IP_BLOCK_TYPE_ISP:
102 		if (pp_funcs && pp_funcs->set_powergating_by_smu)
103 			ret = (pp_funcs->set_powergating_by_smu(
104 				(adev)->powerplay.pp_handle, block_type, gate, 0));
105 		break;
106 	case AMD_IP_BLOCK_TYPE_VCN:
107 		if (pp_funcs && pp_funcs->set_powergating_by_smu)
108 			ret = (pp_funcs->set_powergating_by_smu(
109 				(adev)->powerplay.pp_handle, block_type, gate, inst));
110 		break;
111 	default:
112 		break;
113 	}
114 
115 	if (!ret)
116 		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
117 
118 	mutex_unlock(&adev->pm.mutex);
119 
120 	return ret;
121 }
122 
123 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
124 {
125 	struct smu_context *smu = adev->powerplay.pp_handle;
126 	int ret = -EOPNOTSUPP;
127 
128 	mutex_lock(&adev->pm.mutex);
129 	ret = smu_set_gfx_power_up_by_imu(smu);
130 	mutex_unlock(&adev->pm.mutex);
131 
132 	msleep(10);
133 
134 	return ret;
135 }
136 
137 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
138 {
139 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
140 	void *pp_handle = adev->powerplay.pp_handle;
141 	int ret = 0;
142 
143 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
144 		return -ENOENT;
145 
146 	mutex_lock(&adev->pm.mutex);
147 
148 	/* enter BACO state */
149 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
150 
151 	mutex_unlock(&adev->pm.mutex);
152 
153 	return ret;
154 }
155 
156 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
157 {
158 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
159 	void *pp_handle = adev->powerplay.pp_handle;
160 	int ret = 0;
161 
162 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
163 		return -ENOENT;
164 
165 	mutex_lock(&adev->pm.mutex);
166 
167 	/* exit BACO state */
168 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
169 
170 	mutex_unlock(&adev->pm.mutex);
171 
172 	return ret;
173 }
174 
175 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
176 			     enum pp_mp1_state mp1_state)
177 {
178 	int ret = 0;
179 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
180 
181 	if (mp1_state == PP_MP1_STATE_FLR) {
182 		/* VF lost access to SMU */
183 		if (amdgpu_sriov_vf(adev))
184 			adev->pm.dpm_enabled = false;
185 	} else if (pp_funcs && pp_funcs->set_mp1_state) {
186 		mutex_lock(&adev->pm.mutex);
187 
188 		ret = pp_funcs->set_mp1_state(
189 				adev->powerplay.pp_handle,
190 				mp1_state);
191 
192 		mutex_unlock(&adev->pm.mutex);
193 	}
194 
195 	return ret;
196 }
197 
198 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
199 {
200 	int ret = 0;
201 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
202 
203 	if (pp_funcs && pp_funcs->notify_rlc_state) {
204 		mutex_lock(&adev->pm.mutex);
205 
206 		ret = pp_funcs->notify_rlc_state(
207 				adev->powerplay.pp_handle,
208 				en);
209 
210 		mutex_unlock(&adev->pm.mutex);
211 	}
212 
213 	return ret;
214 }
215 
216 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
217 {
218 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
219 	void *pp_handle = adev->powerplay.pp_handle;
220 	int ret;
221 
222 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
223 		return 0;
224 	/* Don't use baco for reset in S3.
225 	 * This is a workaround for some platforms
226 	 * where entering BACO during suspend
227 	 * seems to cause reboots or hangs.
228 	 * This might be related to the fact that BACO controls
229 	 * power to the whole GPU including devices like audio and USB.
230 	 * Powering down/up everything may adversely affect these other
231 	 * devices.  Needs more investigation.
232 	 */
233 	if (adev->in_s3)
234 		return 0;
235 
236 	mutex_lock(&adev->pm.mutex);
237 
238 	ret = pp_funcs->get_asic_baco_capability(pp_handle);
239 
240 	mutex_unlock(&adev->pm.mutex);
241 
242 	return ret;
243 }
244 
245 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
246 {
247 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
248 	void *pp_handle = adev->powerplay.pp_handle;
249 	int ret = 0;
250 
251 	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
252 		return -ENOENT;
253 
254 	mutex_lock(&adev->pm.mutex);
255 
256 	ret = pp_funcs->asic_reset_mode_2(pp_handle);
257 
258 	mutex_unlock(&adev->pm.mutex);
259 
260 	return ret;
261 }
262 
263 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
264 {
265 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
266 	void *pp_handle = adev->powerplay.pp_handle;
267 	int ret = 0;
268 
269 	if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
270 		return -ENOENT;
271 
272 	mutex_lock(&adev->pm.mutex);
273 
274 	ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
275 
276 	mutex_unlock(&adev->pm.mutex);
277 
278 	return ret;
279 }
280 
281 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
282 {
283 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
284 	void *pp_handle = adev->powerplay.pp_handle;
285 	int ret = 0;
286 
287 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
288 		return -ENOENT;
289 
290 	mutex_lock(&adev->pm.mutex);
291 
292 	/* enter BACO state */
293 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
294 	if (ret)
295 		goto out;
296 
297 	/* exit BACO state */
298 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
299 
300 out:
301 	mutex_unlock(&adev->pm.mutex);
302 	return ret;
303 }
304 
305 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
306 {
307 	struct smu_context *smu = adev->powerplay.pp_handle;
308 	bool support_mode1_reset = false;
309 
310 	if (is_support_sw_smu(adev)) {
311 		mutex_lock(&adev->pm.mutex);
312 		support_mode1_reset = smu_mode1_reset_is_support(smu);
313 		mutex_unlock(&adev->pm.mutex);
314 	}
315 
316 	return support_mode1_reset;
317 }
318 
319 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
320 {
321 	struct smu_context *smu = adev->powerplay.pp_handle;
322 	int ret = -EOPNOTSUPP;
323 
324 	if (is_support_sw_smu(adev)) {
325 		mutex_lock(&adev->pm.mutex);
326 		ret = smu_mode1_reset(smu);
327 		mutex_unlock(&adev->pm.mutex);
328 	}
329 
330 	return ret;
331 }
332 
333 bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev)
334 {
335 	struct smu_context *smu = adev->powerplay.pp_handle;
336 	bool support_link_reset = false;
337 
338 	if (is_support_sw_smu(adev)) {
339 		mutex_lock(&adev->pm.mutex);
340 		support_link_reset = smu_link_reset_is_support(smu);
341 		mutex_unlock(&adev->pm.mutex);
342 	}
343 
344 	return support_link_reset;
345 }
346 
347 int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
348 {
349 	struct smu_context *smu = adev->powerplay.pp_handle;
350 	int ret = -EOPNOTSUPP;
351 
352 	if (is_support_sw_smu(adev)) {
353 		mutex_lock(&adev->pm.mutex);
354 		ret = smu_link_reset(smu);
355 		mutex_unlock(&adev->pm.mutex);
356 	}
357 
358 	return ret;
359 }
360 
361 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
362 				    enum PP_SMC_POWER_PROFILE type,
363 				    bool en)
364 {
365 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
366 	int ret = 0;
367 
368 	if (amdgpu_sriov_vf(adev))
369 		return 0;
370 
371 	if (pp_funcs && pp_funcs->switch_power_profile) {
372 		mutex_lock(&adev->pm.mutex);
373 		ret = pp_funcs->switch_power_profile(
374 			adev->powerplay.pp_handle, type, en);
375 		mutex_unlock(&adev->pm.mutex);
376 	}
377 
378 	return ret;
379 }
380 
381 int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
382 				   bool pause)
383 {
384 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
385 	int ret = 0;
386 
387 	if (amdgpu_sriov_vf(adev))
388 		return 0;
389 
390 	if (pp_funcs && pp_funcs->pause_power_profile) {
391 		mutex_lock(&adev->pm.mutex);
392 		ret = pp_funcs->pause_power_profile(
393 			adev->powerplay.pp_handle, pause);
394 		mutex_unlock(&adev->pm.mutex);
395 	}
396 
397 	return ret;
398 }
399 
400 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
401 			       uint32_t pstate)
402 {
403 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
404 	int ret = 0;
405 
406 	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
407 		mutex_lock(&adev->pm.mutex);
408 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
409 								pstate);
410 		mutex_unlock(&adev->pm.mutex);
411 	}
412 
413 	return ret;
414 }
415 
416 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
417 			     uint32_t cstate)
418 {
419 	int ret = 0;
420 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
421 	void *pp_handle = adev->powerplay.pp_handle;
422 
423 	if (pp_funcs && pp_funcs->set_df_cstate) {
424 		mutex_lock(&adev->pm.mutex);
425 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
426 		mutex_unlock(&adev->pm.mutex);
427 	}
428 
429 	return ret;
430 }
431 
432 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
433 				      enum pp_pm_policy p_type, char *buf)
434 {
435 	struct smu_context *smu = adev->powerplay.pp_handle;
436 	int ret = -EOPNOTSUPP;
437 
438 	if (is_support_sw_smu(adev)) {
439 		mutex_lock(&adev->pm.mutex);
440 		ret = smu_get_pm_policy_info(smu, p_type, buf);
441 		mutex_unlock(&adev->pm.mutex);
442 	}
443 
444 	return ret;
445 }
446 
447 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
448 			     int policy_level)
449 {
450 	struct smu_context *smu = adev->powerplay.pp_handle;
451 	int ret = -EOPNOTSUPP;
452 
453 	if (is_support_sw_smu(adev)) {
454 		mutex_lock(&adev->pm.mutex);
455 		ret = smu_set_pm_policy(smu, policy_type, policy_level);
456 		mutex_unlock(&adev->pm.mutex);
457 	}
458 
459 	return ret;
460 }
461 
462 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
463 {
464 	void *pp_handle = adev->powerplay.pp_handle;
465 	const struct amd_pm_funcs *pp_funcs =
466 			adev->powerplay.pp_funcs;
467 	int ret = 0;
468 
469 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
470 		mutex_lock(&adev->pm.mutex);
471 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
472 		mutex_unlock(&adev->pm.mutex);
473 	}
474 
475 	return ret;
476 }
477 
478 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
479 				      uint32_t msg_id)
480 {
481 	void *pp_handle = adev->powerplay.pp_handle;
482 	const struct amd_pm_funcs *pp_funcs =
483 			adev->powerplay.pp_funcs;
484 	int ret = 0;
485 
486 	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
487 		mutex_lock(&adev->pm.mutex);
488 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
489 						       msg_id);
490 		mutex_unlock(&adev->pm.mutex);
491 	}
492 
493 	return ret;
494 }
495 
496 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
497 				  bool acquire)
498 {
499 	void *pp_handle = adev->powerplay.pp_handle;
500 	const struct amd_pm_funcs *pp_funcs =
501 			adev->powerplay.pp_funcs;
502 	int ret = -EOPNOTSUPP;
503 
504 	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
505 		mutex_lock(&adev->pm.mutex);
506 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
507 						   acquire);
508 		mutex_unlock(&adev->pm.mutex);
509 	}
510 
511 	return ret;
512 }
513 
514 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
515 {
516 	if (adev->pm.dpm_enabled) {
517 		mutex_lock(&adev->pm.mutex);
518 		if (power_supply_is_system_supplied() > 0)
519 			adev->pm.ac_power = true;
520 		else
521 			adev->pm.ac_power = false;
522 
523 		if (adev->powerplay.pp_funcs &&
524 		    adev->powerplay.pp_funcs->enable_bapm)
525 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
526 
527 		if (is_support_sw_smu(adev))
528 			smu_set_ac_dc(adev->powerplay.pp_handle);
529 
530 		mutex_unlock(&adev->pm.mutex);
531 	}
532 }
533 
534 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
535 			   void *data, uint32_t *size)
536 {
537 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
538 	int ret = -EINVAL;
539 
540 	if (!data || !size)
541 		return -EINVAL;
542 
543 	if (pp_funcs && pp_funcs->read_sensor) {
544 		mutex_lock(&adev->pm.mutex);
545 		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
546 					    sensor,
547 					    data,
548 					    size);
549 		mutex_unlock(&adev->pm.mutex);
550 	}
551 
552 	return ret;
553 }
554 
555 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
556 {
557 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
558 	int ret = -EOPNOTSUPP;
559 
560 	if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
561 		mutex_lock(&adev->pm.mutex);
562 		ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
563 		mutex_unlock(&adev->pm.mutex);
564 	}
565 
566 	return ret;
567 }
568 
569 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
570 {
571 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
572 	int ret = -EOPNOTSUPP;
573 
574 	if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
575 		mutex_lock(&adev->pm.mutex);
576 		ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
577 		mutex_unlock(&adev->pm.mutex);
578 	}
579 
580 	return ret;
581 }
582 
583 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
584 {
585 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
586 	int i;
587 
588 	if (!adev->pm.dpm_enabled)
589 		return;
590 
591 	if (!pp_funcs->pm_compute_clocks)
592 		return;
593 
594 	if (adev->mode_info.num_crtc)
595 		amdgpu_display_bandwidth_update(adev);
596 
597 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
598 		struct amdgpu_ring *ring = adev->rings[i];
599 		if (ring && ring->sched.ready)
600 			amdgpu_fence_wait_empty(ring);
601 	}
602 
603 	mutex_lock(&adev->pm.mutex);
604 	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
605 	mutex_unlock(&adev->pm.mutex);
606 }
607 
608 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
609 {
610 	int ret = 0;
611 
612 	if (adev->family == AMDGPU_FAMILY_SI) {
613 		mutex_lock(&adev->pm.mutex);
614 		if (enable) {
615 			adev->pm.dpm.uvd_active = true;
616 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
617 		} else {
618 			adev->pm.dpm.uvd_active = false;
619 		}
620 		mutex_unlock(&adev->pm.mutex);
621 
622 		amdgpu_dpm_compute_clocks(adev);
623 		return;
624 	}
625 
626 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
627 	if (ret)
628 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
629 			  enable ? "enable" : "disable", ret);
630 }
631 
632 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
633 {
634 	int ret = 0;
635 
636 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
637 	if (ret)
638 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
639 			  enable ? "enable" : "disable", ret);
640 }
641 
642 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
643 {
644 	int ret = 0;
645 
646 	if (adev->family == AMDGPU_FAMILY_SI) {
647 		mutex_lock(&adev->pm.mutex);
648 		if (enable) {
649 			adev->pm.dpm.vce_active = true;
650 			/* XXX select vce level based on ring/task */
651 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
652 		} else {
653 			adev->pm.dpm.vce_active = false;
654 		}
655 		mutex_unlock(&adev->pm.mutex);
656 
657 		amdgpu_dpm_compute_clocks(adev);
658 		return;
659 	}
660 
661 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
662 	if (ret)
663 		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
664 			  enable ? "enable" : "disable", ret);
665 }
666 
667 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
668 {
669 	int ret = 0;
670 
671 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
672 	if (ret)
673 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
674 			  enable ? "enable" : "disable", ret);
675 }
676 
677 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
678 {
679 	int ret = 0;
680 
681 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
682 	if (ret)
683 		DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
684 			  enable ? "enable" : "disable", ret);
685 }
686 
687 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
688 {
689 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
690 	int r = 0;
691 
692 	if (!pp_funcs || !pp_funcs->load_firmware ||
693 	    (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
694 		return 0;
695 
696 	mutex_lock(&adev->pm.mutex);
697 	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
698 	if (r) {
699 		pr_err("smu firmware loading failed\n");
700 		goto out;
701 	}
702 
703 	if (smu_version)
704 		*smu_version = adev->pm.fw_version;
705 
706 out:
707 	mutex_unlock(&adev->pm.mutex);
708 	return r;
709 }
710 
711 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
712 {
713 	int ret = 0;
714 
715 	if (is_support_sw_smu(adev)) {
716 		mutex_lock(&adev->pm.mutex);
717 		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
718 						 enable);
719 		mutex_unlock(&adev->pm.mutex);
720 	}
721 
722 	return ret;
723 }
724 
725 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
726 {
727 	struct smu_context *smu = adev->powerplay.pp_handle;
728 	int ret = 0;
729 
730 	if (!is_support_sw_smu(adev))
731 		return -EOPNOTSUPP;
732 
733 	mutex_lock(&adev->pm.mutex);
734 	ret = smu_send_hbm_bad_pages_num(smu, size);
735 	mutex_unlock(&adev->pm.mutex);
736 
737 	return ret;
738 }
739 
740 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
741 {
742 	struct smu_context *smu = adev->powerplay.pp_handle;
743 	int ret = 0;
744 
745 	if (!is_support_sw_smu(adev))
746 		return -EOPNOTSUPP;
747 
748 	mutex_lock(&adev->pm.mutex);
749 	ret = smu_send_hbm_bad_channel_flag(smu, size);
750 	mutex_unlock(&adev->pm.mutex);
751 
752 	return ret;
753 }
754 
755 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
756 {
757 	struct smu_context *smu = adev->powerplay.pp_handle;
758 	int ret;
759 
760 	if (!is_support_sw_smu(adev))
761 		return -EOPNOTSUPP;
762 
763 	mutex_lock(&adev->pm.mutex);
764 	ret = smu_send_rma_reason(smu);
765 	mutex_unlock(&adev->pm.mutex);
766 
767 	return ret;
768 }
769 
770 /**
771  * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported
772  * @adev: amdgpu_device pointer
773  *
774  * This function checks if the SMU supports resetting the SDMA engine.
775  * It returns false if the hardware does not support software SMU or
776  * if the feature is not supported.
777  */
778 bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
779 {
780 	struct smu_context *smu = adev->powerplay.pp_handle;
781 	bool ret;
782 
783 	if (!is_support_sw_smu(adev))
784 		return false;
785 
786 	mutex_lock(&adev->pm.mutex);
787 	ret = smu_reset_sdma_is_supported(smu);
788 	mutex_unlock(&adev->pm.mutex);
789 
790 	return ret;
791 }
792 
793 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
794 {
795 	struct smu_context *smu = adev->powerplay.pp_handle;
796 	int ret;
797 
798 	if (!is_support_sw_smu(adev))
799 		return -EOPNOTSUPP;
800 
801 	mutex_lock(&adev->pm.mutex);
802 	ret = smu_reset_sdma(smu, inst_mask);
803 	mutex_unlock(&adev->pm.mutex);
804 
805 	return ret;
806 }
807 
808 int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
809 {
810 	struct smu_context *smu = adev->powerplay.pp_handle;
811 	int ret;
812 
813 	if (!is_support_sw_smu(adev))
814 		return -EOPNOTSUPP;
815 
816 	mutex_lock(&adev->pm.mutex);
817 	ret = smu_reset_vcn(smu, inst_mask);
818 	mutex_unlock(&adev->pm.mutex);
819 
820 	return ret;
821 }
822 
823 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
824 				  enum pp_clock_type type,
825 				  uint32_t *min,
826 				  uint32_t *max)
827 {
828 	int ret = 0;
829 
830 	if (type != PP_SCLK)
831 		return -EINVAL;
832 
833 	if (!is_support_sw_smu(adev))
834 		return -EOPNOTSUPP;
835 
836 	mutex_lock(&adev->pm.mutex);
837 	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
838 				     SMU_SCLK,
839 				     min,
840 				     max);
841 	mutex_unlock(&adev->pm.mutex);
842 
843 	return ret;
844 }
845 
846 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
847 				   enum pp_clock_type type,
848 				   uint32_t min,
849 				   uint32_t max)
850 {
851 	struct smu_context *smu = adev->powerplay.pp_handle;
852 
853 	if (!is_support_sw_smu(adev))
854 		return -EOPNOTSUPP;
855 
856 	guard(mutex)(&adev->pm.mutex);
857 
858 	return smu_set_soft_freq_range(smu,
859 				      type,
860 				      min,
861 				      max);
862 }
863 
864 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
865 {
866 	struct smu_context *smu = adev->powerplay.pp_handle;
867 	int ret = 0;
868 
869 	if (!is_support_sw_smu(adev))
870 		return 0;
871 
872 	mutex_lock(&adev->pm.mutex);
873 	ret = smu_write_watermarks_table(smu);
874 	mutex_unlock(&adev->pm.mutex);
875 
876 	return ret;
877 }
878 
879 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
880 			      enum smu_event_type event,
881 			      uint64_t event_arg)
882 {
883 	struct smu_context *smu = adev->powerplay.pp_handle;
884 	int ret = 0;
885 
886 	if (!is_support_sw_smu(adev))
887 		return -EOPNOTSUPP;
888 
889 	mutex_lock(&adev->pm.mutex);
890 	ret = smu_wait_for_event(smu, event, event_arg);
891 	mutex_unlock(&adev->pm.mutex);
892 
893 	return ret;
894 }
895 
896 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
897 {
898 	struct smu_context *smu = adev->powerplay.pp_handle;
899 	int ret = 0;
900 
901 	if (!is_support_sw_smu(adev))
902 		return -EOPNOTSUPP;
903 
904 	mutex_lock(&adev->pm.mutex);
905 	ret = smu_set_residency_gfxoff(smu, value);
906 	mutex_unlock(&adev->pm.mutex);
907 
908 	return ret;
909 }
910 
911 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
912 {
913 	struct smu_context *smu = adev->powerplay.pp_handle;
914 	int ret = 0;
915 
916 	if (!is_support_sw_smu(adev))
917 		return -EOPNOTSUPP;
918 
919 	mutex_lock(&adev->pm.mutex);
920 	ret = smu_get_residency_gfxoff(smu, value);
921 	mutex_unlock(&adev->pm.mutex);
922 
923 	return ret;
924 }
925 
926 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
927 {
928 	struct smu_context *smu = adev->powerplay.pp_handle;
929 	int ret = 0;
930 
931 	if (!is_support_sw_smu(adev))
932 		return -EOPNOTSUPP;
933 
934 	mutex_lock(&adev->pm.mutex);
935 	ret = smu_get_entrycount_gfxoff(smu, value);
936 	mutex_unlock(&adev->pm.mutex);
937 
938 	return ret;
939 }
940 
941 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
942 {
943 	struct smu_context *smu = adev->powerplay.pp_handle;
944 	int ret = 0;
945 
946 	if (!is_support_sw_smu(adev))
947 		return -EOPNOTSUPP;
948 
949 	mutex_lock(&adev->pm.mutex);
950 	ret = smu_get_status_gfxoff(smu, value);
951 	mutex_unlock(&adev->pm.mutex);
952 
953 	return ret;
954 }
955 
956 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
957 {
958 	struct smu_context *smu = adev->powerplay.pp_handle;
959 
960 	if (!is_support_sw_smu(adev))
961 		return 0;
962 
963 	return atomic64_read(&smu->throttle_int_counter);
964 }
965 
966 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
967  * @adev: amdgpu_device pointer
968  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
969  *
970  */
971 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
972 				 enum gfx_change_state state)
973 {
974 	mutex_lock(&adev->pm.mutex);
975 	if (adev->powerplay.pp_funcs &&
976 	    adev->powerplay.pp_funcs->gfx_state_change_set)
977 		((adev)->powerplay.pp_funcs->gfx_state_change_set(
978 			(adev)->powerplay.pp_handle, state));
979 	mutex_unlock(&adev->pm.mutex);
980 }
981 
982 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
983 			    void *umc_ecc)
984 {
985 	struct smu_context *smu = adev->powerplay.pp_handle;
986 	int ret = 0;
987 
988 	if (!is_support_sw_smu(adev))
989 		return -EOPNOTSUPP;
990 
991 	mutex_lock(&adev->pm.mutex);
992 	ret = smu_get_ecc_info(smu, umc_ecc);
993 	mutex_unlock(&adev->pm.mutex);
994 
995 	return ret;
996 }
997 
998 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
999 						     uint32_t idx)
1000 {
1001 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1002 	struct amd_vce_state *vstate = NULL;
1003 
1004 	if (!pp_funcs->get_vce_clock_state)
1005 		return NULL;
1006 
1007 	mutex_lock(&adev->pm.mutex);
1008 	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
1009 					       idx);
1010 	mutex_unlock(&adev->pm.mutex);
1011 
1012 	return vstate;
1013 }
1014 
1015 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
1016 					enum amd_pm_state_type *state)
1017 {
1018 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1019 
1020 	mutex_lock(&adev->pm.mutex);
1021 
1022 	if (!pp_funcs->get_current_power_state) {
1023 		*state = adev->pm.dpm.user_state;
1024 		goto out;
1025 	}
1026 
1027 	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
1028 	if (*state < POWER_STATE_TYPE_DEFAULT ||
1029 	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
1030 		*state = adev->pm.dpm.user_state;
1031 
1032 out:
1033 	mutex_unlock(&adev->pm.mutex);
1034 }
1035 
1036 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
1037 				enum amd_pm_state_type state)
1038 {
1039 	mutex_lock(&adev->pm.mutex);
1040 	adev->pm.dpm.user_state = state;
1041 	mutex_unlock(&adev->pm.mutex);
1042 
1043 	if (is_support_sw_smu(adev))
1044 		return;
1045 
1046 	if (amdgpu_dpm_dispatch_task(adev,
1047 				     AMD_PP_TASK_ENABLE_USER_STATE,
1048 				     &state) == -EOPNOTSUPP)
1049 		amdgpu_dpm_compute_clocks(adev);
1050 }
1051 
1052 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
1053 {
1054 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1055 	enum amd_dpm_forced_level level;
1056 
1057 	if (!pp_funcs)
1058 		return AMD_DPM_FORCED_LEVEL_AUTO;
1059 
1060 	mutex_lock(&adev->pm.mutex);
1061 	if (pp_funcs->get_performance_level)
1062 		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
1063 	else
1064 		level = adev->pm.dpm.forced_level;
1065 	mutex_unlock(&adev->pm.mutex);
1066 
1067 	return level;
1068 }
1069 
1070 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
1071 {
1072 	/* enter UMD Pstate */
1073 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1074 					       AMD_PG_STATE_UNGATE);
1075 	amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1076 					       AMD_CG_STATE_UNGATE);
1077 }
1078 
1079 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
1080 {
1081 	/* exit UMD Pstate */
1082 	amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1083 					       AMD_CG_STATE_GATE);
1084 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1085 					       AMD_PG_STATE_GATE);
1086 }
1087 
1088 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
1089 				       enum amd_dpm_forced_level level)
1090 {
1091 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1092 	enum amd_dpm_forced_level current_level;
1093 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1094 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1095 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1096 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1097 
1098 	if (!pp_funcs || !pp_funcs->force_performance_level)
1099 		return 0;
1100 
1101 	if (adev->pm.dpm.thermal_active)
1102 		return -EINVAL;
1103 
1104 	current_level = amdgpu_dpm_get_performance_level(adev);
1105 	if (current_level == level)
1106 		return 0;
1107 
1108 	if (!(current_level & profile_mode_mask) &&
1109 	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
1110 		return -EINVAL;
1111 
1112 	if (adev->asic_type == CHIP_RAVEN) {
1113 		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
1114 			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1115 			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
1116 				amdgpu_gfx_off_ctrl(adev, false);
1117 			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
1118 				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
1119 				amdgpu_gfx_off_ctrl(adev, true);
1120 		}
1121 	}
1122 
1123 	if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
1124 		amdgpu_dpm_enter_umd_state(adev);
1125 	else if ((current_level & profile_mode_mask) &&
1126 		 !(level & profile_mode_mask))
1127 		amdgpu_dpm_exit_umd_state(adev);
1128 
1129 	mutex_lock(&adev->pm.mutex);
1130 
1131 	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1132 					      level)) {
1133 		mutex_unlock(&adev->pm.mutex);
1134 		/* If new level failed, retain the umd state as before */
1135 		if (!(current_level & profile_mode_mask) &&
1136 		    (level & profile_mode_mask))
1137 			amdgpu_dpm_exit_umd_state(adev);
1138 		else if ((current_level & profile_mode_mask) &&
1139 			 !(level & profile_mode_mask))
1140 			amdgpu_dpm_enter_umd_state(adev);
1141 
1142 		return -EINVAL;
1143 	}
1144 
1145 	adev->pm.dpm.forced_level = level;
1146 
1147 	mutex_unlock(&adev->pm.mutex);
1148 
1149 	return 0;
1150 }
1151 
1152 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1153 				 struct pp_states_info *states)
1154 {
1155 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1156 	int ret = 0;
1157 
1158 	if (!pp_funcs->get_pp_num_states)
1159 		return -EOPNOTSUPP;
1160 
1161 	mutex_lock(&adev->pm.mutex);
1162 	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1163 					  states);
1164 	mutex_unlock(&adev->pm.mutex);
1165 
1166 	return ret;
1167 }
1168 
1169 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1170 			      enum amd_pp_task task_id,
1171 			      enum amd_pm_state_type *user_state)
1172 {
1173 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1174 	int ret = 0;
1175 
1176 	if (!pp_funcs->dispatch_tasks)
1177 		return -EOPNOTSUPP;
1178 
1179 	mutex_lock(&adev->pm.mutex);
1180 	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1181 				       task_id,
1182 				       user_state);
1183 	mutex_unlock(&adev->pm.mutex);
1184 
1185 	return ret;
1186 }
1187 
1188 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1189 {
1190 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1191 	int ret = 0;
1192 
1193 	if (!pp_funcs->get_pp_table)
1194 		return 0;
1195 
1196 	mutex_lock(&adev->pm.mutex);
1197 	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1198 				     table);
1199 	mutex_unlock(&adev->pm.mutex);
1200 
1201 	return ret;
1202 }
1203 
1204 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1205 				      uint32_t type,
1206 				      long *input,
1207 				      uint32_t size)
1208 {
1209 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1210 	int ret = 0;
1211 
1212 	if (!pp_funcs->set_fine_grain_clk_vol)
1213 		return 0;
1214 
1215 	mutex_lock(&adev->pm.mutex);
1216 	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1217 					       type,
1218 					       input,
1219 					       size);
1220 	mutex_unlock(&adev->pm.mutex);
1221 
1222 	return ret;
1223 }
1224 
1225 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1226 				  uint32_t type,
1227 				  long *input,
1228 				  uint32_t size)
1229 {
1230 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1231 	int ret = 0;
1232 
1233 	if (!pp_funcs->odn_edit_dpm_table)
1234 		return 0;
1235 
1236 	mutex_lock(&adev->pm.mutex);
1237 	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1238 					   type,
1239 					   input,
1240 					   size);
1241 	mutex_unlock(&adev->pm.mutex);
1242 
1243 	return ret;
1244 }
1245 
1246 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1247 				  enum pp_clock_type type,
1248 				  char *buf)
1249 {
1250 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1251 	int ret = 0;
1252 
1253 	if (!pp_funcs->print_clock_levels)
1254 		return 0;
1255 
1256 	mutex_lock(&adev->pm.mutex);
1257 	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1258 					   type,
1259 					   buf);
1260 	mutex_unlock(&adev->pm.mutex);
1261 
1262 	return ret;
1263 }
1264 
1265 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1266 				  enum pp_clock_type type,
1267 				  char *buf,
1268 				  int *offset)
1269 {
1270 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1271 	int ret = 0;
1272 
1273 	if (!pp_funcs->emit_clock_levels)
1274 		return -ENOENT;
1275 
1276 	mutex_lock(&adev->pm.mutex);
1277 	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1278 					   type,
1279 					   buf,
1280 					   offset);
1281 	mutex_unlock(&adev->pm.mutex);
1282 
1283 	return ret;
1284 }
1285 
1286 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1287 				    uint64_t ppfeature_masks)
1288 {
1289 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1290 	int ret = 0;
1291 
1292 	if (!pp_funcs->set_ppfeature_status)
1293 		return 0;
1294 
1295 	mutex_lock(&adev->pm.mutex);
1296 	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1297 					     ppfeature_masks);
1298 	mutex_unlock(&adev->pm.mutex);
1299 
1300 	return ret;
1301 }
1302 
1303 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1304 {
1305 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1306 	int ret = 0;
1307 
1308 	if (!pp_funcs->get_ppfeature_status)
1309 		return 0;
1310 
1311 	mutex_lock(&adev->pm.mutex);
1312 	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1313 					     buf);
1314 	mutex_unlock(&adev->pm.mutex);
1315 
1316 	return ret;
1317 }
1318 
1319 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1320 				 enum pp_clock_type type,
1321 				 uint32_t mask)
1322 {
1323 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1324 	int ret = 0;
1325 
1326 	if (!pp_funcs->force_clock_level)
1327 		return 0;
1328 
1329 	mutex_lock(&adev->pm.mutex);
1330 	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1331 					  type,
1332 					  mask);
1333 	mutex_unlock(&adev->pm.mutex);
1334 
1335 	return ret;
1336 }
1337 
1338 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1339 {
1340 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1341 	int ret = 0;
1342 
1343 	if (!pp_funcs->get_sclk_od)
1344 		return -EOPNOTSUPP;
1345 
1346 	mutex_lock(&adev->pm.mutex);
1347 	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1348 	mutex_unlock(&adev->pm.mutex);
1349 
1350 	return ret;
1351 }
1352 
1353 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1354 {
1355 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1356 
1357 	if (is_support_sw_smu(adev))
1358 		return -EOPNOTSUPP;
1359 
1360 	mutex_lock(&adev->pm.mutex);
1361 	if (pp_funcs->set_sclk_od)
1362 		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1363 	mutex_unlock(&adev->pm.mutex);
1364 
1365 	if (amdgpu_dpm_dispatch_task(adev,
1366 				     AMD_PP_TASK_READJUST_POWER_STATE,
1367 				     NULL) == -EOPNOTSUPP) {
1368 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1369 		amdgpu_dpm_compute_clocks(adev);
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1376 {
1377 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1378 	int ret = 0;
1379 
1380 	if (!pp_funcs->get_mclk_od)
1381 		return -EOPNOTSUPP;
1382 
1383 	mutex_lock(&adev->pm.mutex);
1384 	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1385 	mutex_unlock(&adev->pm.mutex);
1386 
1387 	return ret;
1388 }
1389 
1390 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1391 {
1392 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1393 
1394 	if (is_support_sw_smu(adev))
1395 		return -EOPNOTSUPP;
1396 
1397 	mutex_lock(&adev->pm.mutex);
1398 	if (pp_funcs->set_mclk_od)
1399 		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1400 	mutex_unlock(&adev->pm.mutex);
1401 
1402 	if (amdgpu_dpm_dispatch_task(adev,
1403 				     AMD_PP_TASK_READJUST_POWER_STATE,
1404 				     NULL) == -EOPNOTSUPP) {
1405 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1406 		amdgpu_dpm_compute_clocks(adev);
1407 	}
1408 
1409 	return 0;
1410 }
1411 
1412 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1413 				      char *buf)
1414 {
1415 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1416 	int ret = 0;
1417 
1418 	if (!pp_funcs->get_power_profile_mode)
1419 		return -EOPNOTSUPP;
1420 
1421 	mutex_lock(&adev->pm.mutex);
1422 	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1423 					       buf);
1424 	mutex_unlock(&adev->pm.mutex);
1425 
1426 	return ret;
1427 }
1428 
1429 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1430 				      long *input, uint32_t size)
1431 {
1432 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1433 	int ret = 0;
1434 
1435 	if (!pp_funcs->set_power_profile_mode)
1436 		return 0;
1437 
1438 	mutex_lock(&adev->pm.mutex);
1439 	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1440 					       input,
1441 					       size);
1442 	mutex_unlock(&adev->pm.mutex);
1443 
1444 	return ret;
1445 }
1446 
1447 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1448 {
1449 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1450 	int ret = 0;
1451 
1452 	if (!pp_funcs->get_gpu_metrics)
1453 		return 0;
1454 
1455 	mutex_lock(&adev->pm.mutex);
1456 	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1457 					table);
1458 	mutex_unlock(&adev->pm.mutex);
1459 
1460 	return ret;
1461 }
1462 
1463 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1464 				  size_t size)
1465 {
1466 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1467 	int ret = 0;
1468 
1469 	if (!pp_funcs->get_pm_metrics)
1470 		return -EOPNOTSUPP;
1471 
1472 	mutex_lock(&adev->pm.mutex);
1473 	ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1474 				       size);
1475 	mutex_unlock(&adev->pm.mutex);
1476 
1477 	return ret;
1478 }
1479 
1480 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1481 				    uint32_t *fan_mode)
1482 {
1483 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1484 	int ret = 0;
1485 
1486 	if (!pp_funcs->get_fan_control_mode)
1487 		return -EOPNOTSUPP;
1488 
1489 	mutex_lock(&adev->pm.mutex);
1490 	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1491 					     fan_mode);
1492 	mutex_unlock(&adev->pm.mutex);
1493 
1494 	return ret;
1495 }
1496 
1497 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1498 				 uint32_t speed)
1499 {
1500 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1501 	int ret = 0;
1502 
1503 	if (!pp_funcs->set_fan_speed_pwm)
1504 		return -EOPNOTSUPP;
1505 
1506 	mutex_lock(&adev->pm.mutex);
1507 	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1508 					  speed);
1509 	mutex_unlock(&adev->pm.mutex);
1510 
1511 	return ret;
1512 }
1513 
1514 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1515 				 uint32_t *speed)
1516 {
1517 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1518 	int ret = 0;
1519 
1520 	if (!pp_funcs->get_fan_speed_pwm)
1521 		return -EOPNOTSUPP;
1522 
1523 	mutex_lock(&adev->pm.mutex);
1524 	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1525 					  speed);
1526 	mutex_unlock(&adev->pm.mutex);
1527 
1528 	return ret;
1529 }
1530 
1531 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1532 				 uint32_t *speed)
1533 {
1534 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1535 	int ret = 0;
1536 
1537 	if (!pp_funcs->get_fan_speed_rpm)
1538 		return -EOPNOTSUPP;
1539 
1540 	mutex_lock(&adev->pm.mutex);
1541 	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1542 					  speed);
1543 	mutex_unlock(&adev->pm.mutex);
1544 
1545 	return ret;
1546 }
1547 
1548 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1549 				 uint32_t speed)
1550 {
1551 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1552 	int ret = 0;
1553 
1554 	if (!pp_funcs->set_fan_speed_rpm)
1555 		return -EOPNOTSUPP;
1556 
1557 	mutex_lock(&adev->pm.mutex);
1558 	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1559 					  speed);
1560 	mutex_unlock(&adev->pm.mutex);
1561 
1562 	return ret;
1563 }
1564 
1565 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1566 				    uint32_t mode)
1567 {
1568 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1569 	int ret = 0;
1570 
1571 	if (!pp_funcs->set_fan_control_mode)
1572 		return -EOPNOTSUPP;
1573 
1574 	mutex_lock(&adev->pm.mutex);
1575 	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1576 					     mode);
1577 	mutex_unlock(&adev->pm.mutex);
1578 
1579 	return ret;
1580 }
1581 
1582 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1583 			       uint32_t *limit,
1584 			       enum pp_power_limit_level pp_limit_level,
1585 			       enum pp_power_type power_type)
1586 {
1587 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1588 	int ret = 0;
1589 
1590 	if (!pp_funcs->get_power_limit)
1591 		return -ENODATA;
1592 
1593 	mutex_lock(&adev->pm.mutex);
1594 	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1595 					limit,
1596 					pp_limit_level,
1597 					power_type);
1598 	mutex_unlock(&adev->pm.mutex);
1599 
1600 	return ret;
1601 }
1602 
1603 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1604 			       uint32_t limit)
1605 {
1606 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1607 	int ret = 0;
1608 
1609 	if (!pp_funcs->set_power_limit)
1610 		return -EINVAL;
1611 
1612 	mutex_lock(&adev->pm.mutex);
1613 	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1614 					limit);
1615 	mutex_unlock(&adev->pm.mutex);
1616 
1617 	return ret;
1618 }
1619 
1620 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1621 {
1622 	bool cclk_dpm_supported = false;
1623 
1624 	if (!is_support_sw_smu(adev))
1625 		return false;
1626 
1627 	mutex_lock(&adev->pm.mutex);
1628 	cclk_dpm_supported = is_support_cclk_dpm(adev);
1629 	mutex_unlock(&adev->pm.mutex);
1630 
1631 	return (int)cclk_dpm_supported;
1632 }
1633 
1634 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1635 						       struct seq_file *m)
1636 {
1637 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1638 
1639 	if (!pp_funcs->debugfs_print_current_performance_level)
1640 		return -EOPNOTSUPP;
1641 
1642 	mutex_lock(&adev->pm.mutex);
1643 	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1644 							  m);
1645 	mutex_unlock(&adev->pm.mutex);
1646 
1647 	return 0;
1648 }
1649 
1650 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1651 				       void **addr,
1652 				       size_t *size)
1653 {
1654 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1655 	int ret = 0;
1656 
1657 	if (!pp_funcs->get_smu_prv_buf_details)
1658 		return -ENOSYS;
1659 
1660 	mutex_lock(&adev->pm.mutex);
1661 	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1662 						addr,
1663 						size);
1664 	mutex_unlock(&adev->pm.mutex);
1665 
1666 	return ret;
1667 }
1668 
1669 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1670 {
1671 	if (is_support_sw_smu(adev)) {
1672 		struct smu_context *smu = adev->powerplay.pp_handle;
1673 
1674 		return (smu->od_enabled || smu->is_apu);
1675 	} else {
1676 		struct pp_hwmgr *hwmgr;
1677 
1678 		/*
1679 		 * dpm on some legacy asics don't carry od_enabled member
1680 		 * as its pp_handle is casted directly from adev.
1681 		 */
1682 		if (amdgpu_dpm_is_legacy_dpm(adev))
1683 			return false;
1684 
1685 		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1686 
1687 		return hwmgr->od_enabled;
1688 	}
1689 }
1690 
1691 int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev)
1692 {
1693 	if (is_support_sw_smu(adev)) {
1694 		struct smu_context *smu = adev->powerplay.pp_handle;
1695 
1696 		return smu->od_enabled;
1697 	} else {
1698 		struct pp_hwmgr *hwmgr;
1699 
1700 		/*
1701 		 * dpm on some legacy asics don't carry od_enabled member
1702 		 * as its pp_handle is casted directly from adev.
1703 		 */
1704 		if (amdgpu_dpm_is_legacy_dpm(adev))
1705 			return false;
1706 
1707 		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1708 
1709 		return hwmgr->od_enabled;
1710 	}
1711 }
1712 
1713 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1714 			    const char *buf,
1715 			    size_t size)
1716 {
1717 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1718 	int ret = 0;
1719 
1720 	if (!pp_funcs->set_pp_table)
1721 		return -EOPNOTSUPP;
1722 
1723 	mutex_lock(&adev->pm.mutex);
1724 	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1725 				     buf,
1726 				     size);
1727 	mutex_unlock(&adev->pm.mutex);
1728 
1729 	return ret;
1730 }
1731 
1732 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1733 {
1734 	struct smu_context *smu = adev->powerplay.pp_handle;
1735 
1736 	if (!is_support_sw_smu(adev))
1737 		return INT_MAX;
1738 
1739 	return smu->cpu_core_num;
1740 }
1741 
1742 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1743 {
1744 	if (!is_support_sw_smu(adev))
1745 		return;
1746 
1747 	amdgpu_smu_stb_debug_fs_init(adev);
1748 }
1749 
1750 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1751 					    const struct amd_pp_display_configuration *input)
1752 {
1753 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1754 	int ret = 0;
1755 
1756 	if (!pp_funcs->display_configuration_change)
1757 		return 0;
1758 
1759 	mutex_lock(&adev->pm.mutex);
1760 	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1761 						     input);
1762 	mutex_unlock(&adev->pm.mutex);
1763 
1764 	return ret;
1765 }
1766 
1767 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1768 				 enum amd_pp_clock_type type,
1769 				 struct amd_pp_clocks *clocks)
1770 {
1771 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1772 	int ret = 0;
1773 
1774 	if (!pp_funcs->get_clock_by_type)
1775 		return 0;
1776 
1777 	mutex_lock(&adev->pm.mutex);
1778 	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1779 					  type,
1780 					  clocks);
1781 	mutex_unlock(&adev->pm.mutex);
1782 
1783 	return ret;
1784 }
1785 
1786 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1787 						struct amd_pp_simple_clock_info *clocks)
1788 {
1789 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1790 	int ret = 0;
1791 
1792 	if (!pp_funcs->get_display_mode_validation_clocks)
1793 		return 0;
1794 
1795 	mutex_lock(&adev->pm.mutex);
1796 	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1797 							   clocks);
1798 	mutex_unlock(&adev->pm.mutex);
1799 
1800 	return ret;
1801 }
1802 
1803 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1804 					      enum amd_pp_clock_type type,
1805 					      struct pp_clock_levels_with_latency *clocks)
1806 {
1807 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1808 	int ret = 0;
1809 
1810 	if (!pp_funcs->get_clock_by_type_with_latency)
1811 		return 0;
1812 
1813 	mutex_lock(&adev->pm.mutex);
1814 	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1815 						       type,
1816 						       clocks);
1817 	mutex_unlock(&adev->pm.mutex);
1818 
1819 	return ret;
1820 }
1821 
1822 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1823 					      enum amd_pp_clock_type type,
1824 					      struct pp_clock_levels_with_voltage *clocks)
1825 {
1826 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1827 	int ret = 0;
1828 
1829 	if (!pp_funcs->get_clock_by_type_with_voltage)
1830 		return 0;
1831 
1832 	mutex_lock(&adev->pm.mutex);
1833 	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1834 						       type,
1835 						       clocks);
1836 	mutex_unlock(&adev->pm.mutex);
1837 
1838 	return ret;
1839 }
1840 
1841 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1842 					       void *clock_ranges)
1843 {
1844 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1845 	int ret = 0;
1846 
1847 	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1848 		return -EOPNOTSUPP;
1849 
1850 	mutex_lock(&adev->pm.mutex);
1851 	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1852 							 clock_ranges);
1853 	mutex_unlock(&adev->pm.mutex);
1854 
1855 	return ret;
1856 }
1857 
1858 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1859 					     struct pp_display_clock_request *clock)
1860 {
1861 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1862 	int ret = 0;
1863 
1864 	if (!pp_funcs->display_clock_voltage_request)
1865 		return -EOPNOTSUPP;
1866 
1867 	mutex_lock(&adev->pm.mutex);
1868 	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1869 						      clock);
1870 	mutex_unlock(&adev->pm.mutex);
1871 
1872 	return ret;
1873 }
1874 
1875 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1876 				  struct amd_pp_clock_info *clocks)
1877 {
1878 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1879 	int ret = 0;
1880 
1881 	if (!pp_funcs->get_current_clocks)
1882 		return -EOPNOTSUPP;
1883 
1884 	mutex_lock(&adev->pm.mutex);
1885 	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1886 					   clocks);
1887 	mutex_unlock(&adev->pm.mutex);
1888 
1889 	return ret;
1890 }
1891 
1892 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1893 {
1894 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1895 
1896 	if (!pp_funcs->notify_smu_enable_pwe)
1897 		return;
1898 
1899 	mutex_lock(&adev->pm.mutex);
1900 	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1901 	mutex_unlock(&adev->pm.mutex);
1902 }
1903 
1904 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1905 					uint32_t count)
1906 {
1907 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1908 	int ret = 0;
1909 
1910 	if (!pp_funcs->set_active_display_count)
1911 		return -EOPNOTSUPP;
1912 
1913 	mutex_lock(&adev->pm.mutex);
1914 	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1915 						 count);
1916 	mutex_unlock(&adev->pm.mutex);
1917 
1918 	return ret;
1919 }
1920 
1921 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1922 					  uint32_t clock)
1923 {
1924 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1925 	int ret = 0;
1926 
1927 	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1928 		return -EOPNOTSUPP;
1929 
1930 	mutex_lock(&adev->pm.mutex);
1931 	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1932 						   clock);
1933 	mutex_unlock(&adev->pm.mutex);
1934 
1935 	return ret;
1936 }
1937 
1938 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1939 					     uint32_t clock)
1940 {
1941 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1942 
1943 	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1944 		return;
1945 
1946 	mutex_lock(&adev->pm.mutex);
1947 	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1948 					       clock);
1949 	mutex_unlock(&adev->pm.mutex);
1950 }
1951 
1952 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1953 					  uint32_t clock)
1954 {
1955 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1956 
1957 	if (!pp_funcs->set_hard_min_fclk_by_freq)
1958 		return;
1959 
1960 	mutex_lock(&adev->pm.mutex);
1961 	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1962 					    clock);
1963 	mutex_unlock(&adev->pm.mutex);
1964 }
1965 
1966 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1967 						   bool disable_memory_clock_switch)
1968 {
1969 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1970 	int ret = 0;
1971 
1972 	if (!pp_funcs->display_disable_memory_clock_switch)
1973 		return 0;
1974 
1975 	mutex_lock(&adev->pm.mutex);
1976 	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1977 							    disable_memory_clock_switch);
1978 	mutex_unlock(&adev->pm.mutex);
1979 
1980 	return ret;
1981 }
1982 
1983 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1984 						struct pp_smu_nv_clock_table *max_clocks)
1985 {
1986 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1987 	int ret = 0;
1988 
1989 	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1990 		return -EOPNOTSUPP;
1991 
1992 	mutex_lock(&adev->pm.mutex);
1993 	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1994 							 max_clocks);
1995 	mutex_unlock(&adev->pm.mutex);
1996 
1997 	return ret;
1998 }
1999 
2000 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
2001 						  unsigned int *clock_values_in_khz,
2002 						  unsigned int *num_states)
2003 {
2004 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2005 	int ret = 0;
2006 
2007 	if (!pp_funcs->get_uclk_dpm_states)
2008 		return -EOPNOTSUPP;
2009 
2010 	mutex_lock(&adev->pm.mutex);
2011 	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
2012 					    clock_values_in_khz,
2013 					    num_states);
2014 	mutex_unlock(&adev->pm.mutex);
2015 
2016 	return ret;
2017 }
2018 
2019 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
2020 				   struct dpm_clocks *clock_table)
2021 {
2022 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2023 	int ret = 0;
2024 
2025 	if (!pp_funcs->get_dpm_clock_table)
2026 		return -EOPNOTSUPP;
2027 
2028 	mutex_lock(&adev->pm.mutex);
2029 	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
2030 					    clock_table);
2031 	mutex_unlock(&adev->pm.mutex);
2032 
2033 	return ret;
2034 }
2035 
2036 /**
2037  * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute
2038  * partition
2039  * @adev: Pointer to the device.
2040  * @type: Identifier for the temperature type metrics to be fetched.
2041  * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
2042  * function returns the size of the metrics structure.
2043  *
2044  * This function retrieves metrics for a specific temperature type, If the
2045  * table parameter is NULL, the function returns the size of the metrics
2046  * structure without populating it.
2047  *
2048  * Return: Size of the metrics structure on success, or a negative error code on failure.
2049  */
2050 ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
2051 				    enum smu_temp_metric_type type, void *table)
2052 {
2053 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2054 	int ret;
2055 
2056 	if (!pp_funcs->get_temp_metrics ||
2057 	    !amdgpu_dpm_is_temp_metrics_supported(adev, type))
2058 		return -EOPNOTSUPP;
2059 
2060 	mutex_lock(&adev->pm.mutex);
2061 	ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table);
2062 	mutex_unlock(&adev->pm.mutex);
2063 
2064 	return ret;
2065 }
2066 
2067 /**
2068  * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support
2069  * is available
2070  * @adev: Pointer to the device.
2071  * @type: Identifier for the temperature type metrics to be fetched.
2072  *
2073  * This function returns metrics if specific temperature metrics type is supported or not.
2074  *
2075  * Return: True in case of metrics type supported else false.
2076  */
2077 bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
2078 					  enum smu_temp_metric_type type)
2079 {
2080 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2081 	bool support_temp_metrics = false;
2082 
2083 	if (!pp_funcs->temp_metrics_is_supported)
2084 		return support_temp_metrics;
2085 
2086 	if (is_support_sw_smu(adev)) {
2087 		mutex_lock(&adev->pm.mutex);
2088 		support_temp_metrics =
2089 			pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type);
2090 		mutex_unlock(&adev->pm.mutex);
2091 	}
2092 
2093 	return support_temp_metrics;
2094 }
2095 
2096 /**
2097  * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
2098  * partition
2099  * @adev: Pointer to the device.
2100  * @xcp_id: Identifier of the XCP for which metrics are to be retrieved.
2101  * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
2102  * function returns the size of the metrics structure.
2103  *
2104  * This function retrieves metrics for a specific XCP, including details such as
2105  * VCN/JPEG activity, clock frequencies, and other performance metrics. If the
2106  * table parameter is NULL, the function returns the size of the metrics
2107  * structure without populating it.
2108  *
2109  * Return: Size of the metrics structure on success, or a negative error code on failure.
2110  */
2111 ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
2112 				   void *table)
2113 {
2114 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2115 	int ret = 0;
2116 
2117 	if (!pp_funcs->get_xcp_metrics)
2118 		return 0;
2119 
2120 	mutex_lock(&adev->pm.mutex);
2121 	ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id,
2122 					table);
2123 	mutex_unlock(&adev->pm.mutex);
2124 
2125 	return ret;
2126 }
2127