xref: /linux/drivers/gpu/drm/amd/pm/amdgpu_dpm.c (revision de848da12f752170c2ebe114804a985314fd5a6a)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35 
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38 
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40 
41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 	int ret = 0;
45 
46 	if (!pp_funcs->get_sclk)
47 		return 0;
48 
49 	mutex_lock(&adev->pm.mutex);
50 	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 				 low);
52 	mutex_unlock(&adev->pm.mutex);
53 
54 	return ret;
55 }
56 
57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 	int ret = 0;
61 
62 	if (!pp_funcs->get_mclk)
63 		return 0;
64 
65 	mutex_lock(&adev->pm.mutex);
66 	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 				 low);
68 	mutex_unlock(&adev->pm.mutex);
69 
70 	return ret;
71 }
72 
73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
74 {
75 	int ret = 0;
76 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
77 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
78 
79 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
80 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
81 				block_type, gate ? "gate" : "ungate");
82 		return 0;
83 	}
84 
85 	mutex_lock(&adev->pm.mutex);
86 
87 	switch (block_type) {
88 	case AMD_IP_BLOCK_TYPE_UVD:
89 	case AMD_IP_BLOCK_TYPE_VCE:
90 	case AMD_IP_BLOCK_TYPE_GFX:
91 	case AMD_IP_BLOCK_TYPE_VCN:
92 	case AMD_IP_BLOCK_TYPE_SDMA:
93 	case AMD_IP_BLOCK_TYPE_JPEG:
94 	case AMD_IP_BLOCK_TYPE_GMC:
95 	case AMD_IP_BLOCK_TYPE_ACP:
96 	case AMD_IP_BLOCK_TYPE_VPE:
97 		if (pp_funcs && pp_funcs->set_powergating_by_smu)
98 			ret = (pp_funcs->set_powergating_by_smu(
99 				(adev)->powerplay.pp_handle, block_type, gate));
100 		break;
101 	default:
102 		break;
103 	}
104 
105 	if (!ret)
106 		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
107 
108 	mutex_unlock(&adev->pm.mutex);
109 
110 	return ret;
111 }
112 
113 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
114 {
115 	struct smu_context *smu = adev->powerplay.pp_handle;
116 	int ret = -EOPNOTSUPP;
117 
118 	mutex_lock(&adev->pm.mutex);
119 	ret = smu_set_gfx_power_up_by_imu(smu);
120 	mutex_unlock(&adev->pm.mutex);
121 
122 	msleep(10);
123 
124 	return ret;
125 }
126 
127 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
128 {
129 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
130 	void *pp_handle = adev->powerplay.pp_handle;
131 	int ret = 0;
132 
133 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
134 		return -ENOENT;
135 
136 	mutex_lock(&adev->pm.mutex);
137 
138 	/* enter BACO state */
139 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
140 
141 	mutex_unlock(&adev->pm.mutex);
142 
143 	return ret;
144 }
145 
146 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
147 {
148 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
149 	void *pp_handle = adev->powerplay.pp_handle;
150 	int ret = 0;
151 
152 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
153 		return -ENOENT;
154 
155 	mutex_lock(&adev->pm.mutex);
156 
157 	/* exit BACO state */
158 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
159 
160 	mutex_unlock(&adev->pm.mutex);
161 
162 	return ret;
163 }
164 
165 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
166 			     enum pp_mp1_state mp1_state)
167 {
168 	int ret = 0;
169 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
170 
171 	if (mp1_state == PP_MP1_STATE_FLR) {
172 		/* VF lost access to SMU */
173 		if (amdgpu_sriov_vf(adev))
174 			adev->pm.dpm_enabled = false;
175 	} else if (pp_funcs && pp_funcs->set_mp1_state) {
176 		mutex_lock(&adev->pm.mutex);
177 
178 		ret = pp_funcs->set_mp1_state(
179 				adev->powerplay.pp_handle,
180 				mp1_state);
181 
182 		mutex_unlock(&adev->pm.mutex);
183 	}
184 
185 	return ret;
186 }
187 
188 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
189 {
190 	int ret = 0;
191 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
192 
193 	if (pp_funcs && pp_funcs->notify_rlc_state) {
194 		mutex_lock(&adev->pm.mutex);
195 
196 		ret = pp_funcs->notify_rlc_state(
197 				adev->powerplay.pp_handle,
198 				en);
199 
200 		mutex_unlock(&adev->pm.mutex);
201 	}
202 
203 	return ret;
204 }
205 
206 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
207 {
208 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
209 	void *pp_handle = adev->powerplay.pp_handle;
210 	int ret;
211 
212 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
213 		return 0;
214 	/* Don't use baco for reset in S3.
215 	 * This is a workaround for some platforms
216 	 * where entering BACO during suspend
217 	 * seems to cause reboots or hangs.
218 	 * This might be related to the fact that BACO controls
219 	 * power to the whole GPU including devices like audio and USB.
220 	 * Powering down/up everything may adversely affect these other
221 	 * devices.  Needs more investigation.
222 	 */
223 	if (adev->in_s3)
224 		return 0;
225 
226 	mutex_lock(&adev->pm.mutex);
227 
228 	ret = pp_funcs->get_asic_baco_capability(pp_handle);
229 
230 	mutex_unlock(&adev->pm.mutex);
231 
232 	return ret;
233 }
234 
235 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
236 {
237 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
238 	void *pp_handle = adev->powerplay.pp_handle;
239 	int ret = 0;
240 
241 	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
242 		return -ENOENT;
243 
244 	mutex_lock(&adev->pm.mutex);
245 
246 	ret = pp_funcs->asic_reset_mode_2(pp_handle);
247 
248 	mutex_unlock(&adev->pm.mutex);
249 
250 	return ret;
251 }
252 
253 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
254 {
255 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
256 	void *pp_handle = adev->powerplay.pp_handle;
257 	int ret = 0;
258 
259 	if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
260 		return -ENOENT;
261 
262 	mutex_lock(&adev->pm.mutex);
263 
264 	ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
265 
266 	mutex_unlock(&adev->pm.mutex);
267 
268 	return ret;
269 }
270 
271 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
272 {
273 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
274 	void *pp_handle = adev->powerplay.pp_handle;
275 	int ret = 0;
276 
277 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
278 		return -ENOENT;
279 
280 	mutex_lock(&adev->pm.mutex);
281 
282 	/* enter BACO state */
283 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
284 	if (ret)
285 		goto out;
286 
287 	/* exit BACO state */
288 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
289 
290 out:
291 	mutex_unlock(&adev->pm.mutex);
292 	return ret;
293 }
294 
295 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
296 {
297 	struct smu_context *smu = adev->powerplay.pp_handle;
298 	bool support_mode1_reset = false;
299 
300 	if (is_support_sw_smu(adev)) {
301 		mutex_lock(&adev->pm.mutex);
302 		support_mode1_reset = smu_mode1_reset_is_support(smu);
303 		mutex_unlock(&adev->pm.mutex);
304 	}
305 
306 	return support_mode1_reset;
307 }
308 
309 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
310 {
311 	struct smu_context *smu = adev->powerplay.pp_handle;
312 	int ret = -EOPNOTSUPP;
313 
314 	if (is_support_sw_smu(adev)) {
315 		mutex_lock(&adev->pm.mutex);
316 		ret = smu_mode1_reset(smu);
317 		mutex_unlock(&adev->pm.mutex);
318 	}
319 
320 	return ret;
321 }
322 
323 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
324 				    enum PP_SMC_POWER_PROFILE type,
325 				    bool en)
326 {
327 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
328 	int ret = 0;
329 
330 	if (amdgpu_sriov_vf(adev))
331 		return 0;
332 
333 	if (pp_funcs && pp_funcs->switch_power_profile) {
334 		mutex_lock(&adev->pm.mutex);
335 		ret = pp_funcs->switch_power_profile(
336 			adev->powerplay.pp_handle, type, en);
337 		mutex_unlock(&adev->pm.mutex);
338 	}
339 
340 	return ret;
341 }
342 
343 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
344 			       uint32_t pstate)
345 {
346 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
347 	int ret = 0;
348 
349 	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
350 		mutex_lock(&adev->pm.mutex);
351 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
352 								pstate);
353 		mutex_unlock(&adev->pm.mutex);
354 	}
355 
356 	return ret;
357 }
358 
359 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
360 			     uint32_t cstate)
361 {
362 	int ret = 0;
363 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
364 	void *pp_handle = adev->powerplay.pp_handle;
365 
366 	if (pp_funcs && pp_funcs->set_df_cstate) {
367 		mutex_lock(&adev->pm.mutex);
368 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
369 		mutex_unlock(&adev->pm.mutex);
370 	}
371 
372 	return ret;
373 }
374 
375 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
376 				      enum pp_pm_policy p_type, char *buf)
377 {
378 	struct smu_context *smu = adev->powerplay.pp_handle;
379 	int ret = -EOPNOTSUPP;
380 
381 	if (is_support_sw_smu(adev)) {
382 		mutex_lock(&adev->pm.mutex);
383 		ret = smu_get_pm_policy_info(smu, p_type, buf);
384 		mutex_unlock(&adev->pm.mutex);
385 	}
386 
387 	return ret;
388 }
389 
390 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
391 			     int policy_level)
392 {
393 	struct smu_context *smu = adev->powerplay.pp_handle;
394 	int ret = -EOPNOTSUPP;
395 
396 	if (is_support_sw_smu(adev)) {
397 		mutex_lock(&adev->pm.mutex);
398 		ret = smu_set_pm_policy(smu, policy_type, policy_level);
399 		mutex_unlock(&adev->pm.mutex);
400 	}
401 
402 	return ret;
403 }
404 
405 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
406 {
407 	void *pp_handle = adev->powerplay.pp_handle;
408 	const struct amd_pm_funcs *pp_funcs =
409 			adev->powerplay.pp_funcs;
410 	int ret = 0;
411 
412 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
413 		mutex_lock(&adev->pm.mutex);
414 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
415 		mutex_unlock(&adev->pm.mutex);
416 	}
417 
418 	return ret;
419 }
420 
421 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
422 				      uint32_t msg_id)
423 {
424 	void *pp_handle = adev->powerplay.pp_handle;
425 	const struct amd_pm_funcs *pp_funcs =
426 			adev->powerplay.pp_funcs;
427 	int ret = 0;
428 
429 	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
430 		mutex_lock(&adev->pm.mutex);
431 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
432 						       msg_id);
433 		mutex_unlock(&adev->pm.mutex);
434 	}
435 
436 	return ret;
437 }
438 
439 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
440 				  bool acquire)
441 {
442 	void *pp_handle = adev->powerplay.pp_handle;
443 	const struct amd_pm_funcs *pp_funcs =
444 			adev->powerplay.pp_funcs;
445 	int ret = -EOPNOTSUPP;
446 
447 	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
448 		mutex_lock(&adev->pm.mutex);
449 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
450 						   acquire);
451 		mutex_unlock(&adev->pm.mutex);
452 	}
453 
454 	return ret;
455 }
456 
457 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
458 {
459 	if (adev->pm.dpm_enabled) {
460 		mutex_lock(&adev->pm.mutex);
461 		if (power_supply_is_system_supplied() > 0)
462 			adev->pm.ac_power = true;
463 		else
464 			adev->pm.ac_power = false;
465 
466 		if (adev->powerplay.pp_funcs &&
467 		    adev->powerplay.pp_funcs->enable_bapm)
468 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
469 
470 		if (is_support_sw_smu(adev))
471 			smu_set_ac_dc(adev->powerplay.pp_handle);
472 
473 		mutex_unlock(&adev->pm.mutex);
474 	}
475 }
476 
477 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
478 			   void *data, uint32_t *size)
479 {
480 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
481 	int ret = -EINVAL;
482 
483 	if (!data || !size)
484 		return -EINVAL;
485 
486 	if (pp_funcs && pp_funcs->read_sensor) {
487 		mutex_lock(&adev->pm.mutex);
488 		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
489 					    sensor,
490 					    data,
491 					    size);
492 		mutex_unlock(&adev->pm.mutex);
493 	}
494 
495 	return ret;
496 }
497 
498 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
499 {
500 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
501 	int ret = -EOPNOTSUPP;
502 
503 	if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
504 		mutex_lock(&adev->pm.mutex);
505 		ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
506 		mutex_unlock(&adev->pm.mutex);
507 	}
508 
509 	return ret;
510 }
511 
512 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
513 {
514 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
515 	int ret = -EOPNOTSUPP;
516 
517 	if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
518 		mutex_lock(&adev->pm.mutex);
519 		ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
520 		mutex_unlock(&adev->pm.mutex);
521 	}
522 
523 	return ret;
524 }
525 
526 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
527 {
528 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
529 	int i;
530 
531 	if (!adev->pm.dpm_enabled)
532 		return;
533 
534 	if (!pp_funcs->pm_compute_clocks)
535 		return;
536 
537 	if (adev->mode_info.num_crtc)
538 		amdgpu_display_bandwidth_update(adev);
539 
540 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
541 		struct amdgpu_ring *ring = adev->rings[i];
542 		if (ring && ring->sched.ready)
543 			amdgpu_fence_wait_empty(ring);
544 	}
545 
546 	mutex_lock(&adev->pm.mutex);
547 	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
548 	mutex_unlock(&adev->pm.mutex);
549 }
550 
551 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
552 {
553 	int ret = 0;
554 
555 	if (adev->family == AMDGPU_FAMILY_SI) {
556 		mutex_lock(&adev->pm.mutex);
557 		if (enable) {
558 			adev->pm.dpm.uvd_active = true;
559 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
560 		} else {
561 			adev->pm.dpm.uvd_active = false;
562 		}
563 		mutex_unlock(&adev->pm.mutex);
564 
565 		amdgpu_dpm_compute_clocks(adev);
566 		return;
567 	}
568 
569 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
570 	if (ret)
571 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
572 			  enable ? "enable" : "disable", ret);
573 }
574 
575 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
576 {
577 	int ret = 0;
578 
579 	if (adev->family == AMDGPU_FAMILY_SI) {
580 		mutex_lock(&adev->pm.mutex);
581 		if (enable) {
582 			adev->pm.dpm.vce_active = true;
583 			/* XXX select vce level based on ring/task */
584 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
585 		} else {
586 			adev->pm.dpm.vce_active = false;
587 		}
588 		mutex_unlock(&adev->pm.mutex);
589 
590 		amdgpu_dpm_compute_clocks(adev);
591 		return;
592 	}
593 
594 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
595 	if (ret)
596 		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
597 			  enable ? "enable" : "disable", ret);
598 }
599 
600 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
601 {
602 	int ret = 0;
603 
604 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
605 	if (ret)
606 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
607 			  enable ? "enable" : "disable", ret);
608 }
609 
610 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
611 {
612 	int ret = 0;
613 
614 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
615 	if (ret)
616 		DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
617 			  enable ? "enable" : "disable", ret);
618 }
619 
620 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
621 {
622 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
623 	int r = 0;
624 
625 	if (!pp_funcs || !pp_funcs->load_firmware ||
626 	    (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
627 		return 0;
628 
629 	mutex_lock(&adev->pm.mutex);
630 	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
631 	if (r) {
632 		pr_err("smu firmware loading failed\n");
633 		goto out;
634 	}
635 
636 	if (smu_version)
637 		*smu_version = adev->pm.fw_version;
638 
639 out:
640 	mutex_unlock(&adev->pm.mutex);
641 	return r;
642 }
643 
644 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
645 {
646 	int ret = 0;
647 
648 	if (is_support_sw_smu(adev)) {
649 		mutex_lock(&adev->pm.mutex);
650 		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
651 						 enable);
652 		mutex_unlock(&adev->pm.mutex);
653 	}
654 
655 	return ret;
656 }
657 
658 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
659 {
660 	struct smu_context *smu = adev->powerplay.pp_handle;
661 	int ret = 0;
662 
663 	if (!is_support_sw_smu(adev))
664 		return -EOPNOTSUPP;
665 
666 	mutex_lock(&adev->pm.mutex);
667 	ret = smu_send_hbm_bad_pages_num(smu, size);
668 	mutex_unlock(&adev->pm.mutex);
669 
670 	return ret;
671 }
672 
673 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
674 {
675 	struct smu_context *smu = adev->powerplay.pp_handle;
676 	int ret = 0;
677 
678 	if (!is_support_sw_smu(adev))
679 		return -EOPNOTSUPP;
680 
681 	mutex_lock(&adev->pm.mutex);
682 	ret = smu_send_hbm_bad_channel_flag(smu, size);
683 	mutex_unlock(&adev->pm.mutex);
684 
685 	return ret;
686 }
687 
688 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
689 {
690 	struct smu_context *smu = adev->powerplay.pp_handle;
691 	int ret;
692 
693 	if (!is_support_sw_smu(adev))
694 		return -EOPNOTSUPP;
695 
696 	mutex_lock(&adev->pm.mutex);
697 	ret = smu_send_rma_reason(smu);
698 	mutex_unlock(&adev->pm.mutex);
699 
700 	return ret;
701 }
702 
703 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
704 				  enum pp_clock_type type,
705 				  uint32_t *min,
706 				  uint32_t *max)
707 {
708 	int ret = 0;
709 
710 	if (type != PP_SCLK)
711 		return -EINVAL;
712 
713 	if (!is_support_sw_smu(adev))
714 		return -EOPNOTSUPP;
715 
716 	mutex_lock(&adev->pm.mutex);
717 	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
718 				     SMU_SCLK,
719 				     min,
720 				     max);
721 	mutex_unlock(&adev->pm.mutex);
722 
723 	return ret;
724 }
725 
726 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
727 				   enum pp_clock_type type,
728 				   uint32_t min,
729 				   uint32_t max)
730 {
731 	struct smu_context *smu = adev->powerplay.pp_handle;
732 	int ret = 0;
733 
734 	if (type != PP_SCLK)
735 		return -EINVAL;
736 
737 	if (!is_support_sw_smu(adev))
738 		return -EOPNOTSUPP;
739 
740 	mutex_lock(&adev->pm.mutex);
741 	ret = smu_set_soft_freq_range(smu,
742 				      SMU_SCLK,
743 				      min,
744 				      max);
745 	mutex_unlock(&adev->pm.mutex);
746 
747 	return ret;
748 }
749 
750 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
751 {
752 	struct smu_context *smu = adev->powerplay.pp_handle;
753 	int ret = 0;
754 
755 	if (!is_support_sw_smu(adev))
756 		return 0;
757 
758 	mutex_lock(&adev->pm.mutex);
759 	ret = smu_write_watermarks_table(smu);
760 	mutex_unlock(&adev->pm.mutex);
761 
762 	return ret;
763 }
764 
765 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
766 			      enum smu_event_type event,
767 			      uint64_t event_arg)
768 {
769 	struct smu_context *smu = adev->powerplay.pp_handle;
770 	int ret = 0;
771 
772 	if (!is_support_sw_smu(adev))
773 		return -EOPNOTSUPP;
774 
775 	mutex_lock(&adev->pm.mutex);
776 	ret = smu_wait_for_event(smu, event, event_arg);
777 	mutex_unlock(&adev->pm.mutex);
778 
779 	return ret;
780 }
781 
782 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
783 {
784 	struct smu_context *smu = adev->powerplay.pp_handle;
785 	int ret = 0;
786 
787 	if (!is_support_sw_smu(adev))
788 		return -EOPNOTSUPP;
789 
790 	mutex_lock(&adev->pm.mutex);
791 	ret = smu_set_residency_gfxoff(smu, value);
792 	mutex_unlock(&adev->pm.mutex);
793 
794 	return ret;
795 }
796 
797 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
798 {
799 	struct smu_context *smu = adev->powerplay.pp_handle;
800 	int ret = 0;
801 
802 	if (!is_support_sw_smu(adev))
803 		return -EOPNOTSUPP;
804 
805 	mutex_lock(&adev->pm.mutex);
806 	ret = smu_get_residency_gfxoff(smu, value);
807 	mutex_unlock(&adev->pm.mutex);
808 
809 	return ret;
810 }
811 
812 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
813 {
814 	struct smu_context *smu = adev->powerplay.pp_handle;
815 	int ret = 0;
816 
817 	if (!is_support_sw_smu(adev))
818 		return -EOPNOTSUPP;
819 
820 	mutex_lock(&adev->pm.mutex);
821 	ret = smu_get_entrycount_gfxoff(smu, value);
822 	mutex_unlock(&adev->pm.mutex);
823 
824 	return ret;
825 }
826 
827 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
828 {
829 	struct smu_context *smu = adev->powerplay.pp_handle;
830 	int ret = 0;
831 
832 	if (!is_support_sw_smu(adev))
833 		return -EOPNOTSUPP;
834 
835 	mutex_lock(&adev->pm.mutex);
836 	ret = smu_get_status_gfxoff(smu, value);
837 	mutex_unlock(&adev->pm.mutex);
838 
839 	return ret;
840 }
841 
842 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
843 {
844 	struct smu_context *smu = adev->powerplay.pp_handle;
845 
846 	if (!is_support_sw_smu(adev))
847 		return 0;
848 
849 	return atomic64_read(&smu->throttle_int_counter);
850 }
851 
852 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
853  * @adev: amdgpu_device pointer
854  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
855  *
856  */
857 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
858 				 enum gfx_change_state state)
859 {
860 	mutex_lock(&adev->pm.mutex);
861 	if (adev->powerplay.pp_funcs &&
862 	    adev->powerplay.pp_funcs->gfx_state_change_set)
863 		((adev)->powerplay.pp_funcs->gfx_state_change_set(
864 			(adev)->powerplay.pp_handle, state));
865 	mutex_unlock(&adev->pm.mutex);
866 }
867 
868 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
869 			    void *umc_ecc)
870 {
871 	struct smu_context *smu = adev->powerplay.pp_handle;
872 	int ret = 0;
873 
874 	if (!is_support_sw_smu(adev))
875 		return -EOPNOTSUPP;
876 
877 	mutex_lock(&adev->pm.mutex);
878 	ret = smu_get_ecc_info(smu, umc_ecc);
879 	mutex_unlock(&adev->pm.mutex);
880 
881 	return ret;
882 }
883 
884 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
885 						     uint32_t idx)
886 {
887 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
888 	struct amd_vce_state *vstate = NULL;
889 
890 	if (!pp_funcs->get_vce_clock_state)
891 		return NULL;
892 
893 	mutex_lock(&adev->pm.mutex);
894 	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
895 					       idx);
896 	mutex_unlock(&adev->pm.mutex);
897 
898 	return vstate;
899 }
900 
901 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
902 					enum amd_pm_state_type *state)
903 {
904 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
905 
906 	mutex_lock(&adev->pm.mutex);
907 
908 	if (!pp_funcs->get_current_power_state) {
909 		*state = adev->pm.dpm.user_state;
910 		goto out;
911 	}
912 
913 	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
914 	if (*state < POWER_STATE_TYPE_DEFAULT ||
915 	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
916 		*state = adev->pm.dpm.user_state;
917 
918 out:
919 	mutex_unlock(&adev->pm.mutex);
920 }
921 
922 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
923 				enum amd_pm_state_type state)
924 {
925 	mutex_lock(&adev->pm.mutex);
926 	adev->pm.dpm.user_state = state;
927 	mutex_unlock(&adev->pm.mutex);
928 
929 	if (is_support_sw_smu(adev))
930 		return;
931 
932 	if (amdgpu_dpm_dispatch_task(adev,
933 				     AMD_PP_TASK_ENABLE_USER_STATE,
934 				     &state) == -EOPNOTSUPP)
935 		amdgpu_dpm_compute_clocks(adev);
936 }
937 
938 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
939 {
940 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
941 	enum amd_dpm_forced_level level;
942 
943 	if (!pp_funcs)
944 		return AMD_DPM_FORCED_LEVEL_AUTO;
945 
946 	mutex_lock(&adev->pm.mutex);
947 	if (pp_funcs->get_performance_level)
948 		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
949 	else
950 		level = adev->pm.dpm.forced_level;
951 	mutex_unlock(&adev->pm.mutex);
952 
953 	return level;
954 }
955 
956 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
957 				       enum amd_dpm_forced_level level)
958 {
959 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
960 	enum amd_dpm_forced_level current_level;
961 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
962 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
963 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
964 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
965 
966 	if (!pp_funcs || !pp_funcs->force_performance_level)
967 		return 0;
968 
969 	if (adev->pm.dpm.thermal_active)
970 		return -EINVAL;
971 
972 	current_level = amdgpu_dpm_get_performance_level(adev);
973 	if (current_level == level)
974 		return 0;
975 
976 	if (adev->asic_type == CHIP_RAVEN) {
977 		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
978 			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
979 			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
980 				amdgpu_gfx_off_ctrl(adev, false);
981 			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
982 				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
983 				amdgpu_gfx_off_ctrl(adev, true);
984 		}
985 	}
986 
987 	if (!(current_level & profile_mode_mask) &&
988 	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
989 		return -EINVAL;
990 
991 	if (!(current_level & profile_mode_mask) &&
992 	      (level & profile_mode_mask)) {
993 		/* enter UMD Pstate */
994 		amdgpu_device_ip_set_powergating_state(adev,
995 						       AMD_IP_BLOCK_TYPE_GFX,
996 						       AMD_PG_STATE_UNGATE);
997 		amdgpu_device_ip_set_clockgating_state(adev,
998 						       AMD_IP_BLOCK_TYPE_GFX,
999 						       AMD_CG_STATE_UNGATE);
1000 	} else if ((current_level & profile_mode_mask) &&
1001 		    !(level & profile_mode_mask)) {
1002 		/* exit UMD Pstate */
1003 		amdgpu_device_ip_set_clockgating_state(adev,
1004 						       AMD_IP_BLOCK_TYPE_GFX,
1005 						       AMD_CG_STATE_GATE);
1006 		amdgpu_device_ip_set_powergating_state(adev,
1007 						       AMD_IP_BLOCK_TYPE_GFX,
1008 						       AMD_PG_STATE_GATE);
1009 	}
1010 
1011 	mutex_lock(&adev->pm.mutex);
1012 
1013 	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1014 					      level)) {
1015 		mutex_unlock(&adev->pm.mutex);
1016 		return -EINVAL;
1017 	}
1018 
1019 	adev->pm.dpm.forced_level = level;
1020 
1021 	mutex_unlock(&adev->pm.mutex);
1022 
1023 	return 0;
1024 }
1025 
1026 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1027 				 struct pp_states_info *states)
1028 {
1029 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1030 	int ret = 0;
1031 
1032 	if (!pp_funcs->get_pp_num_states)
1033 		return -EOPNOTSUPP;
1034 
1035 	mutex_lock(&adev->pm.mutex);
1036 	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1037 					  states);
1038 	mutex_unlock(&adev->pm.mutex);
1039 
1040 	return ret;
1041 }
1042 
1043 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1044 			      enum amd_pp_task task_id,
1045 			      enum amd_pm_state_type *user_state)
1046 {
1047 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1048 	int ret = 0;
1049 
1050 	if (!pp_funcs->dispatch_tasks)
1051 		return -EOPNOTSUPP;
1052 
1053 	mutex_lock(&adev->pm.mutex);
1054 	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1055 				       task_id,
1056 				       user_state);
1057 	mutex_unlock(&adev->pm.mutex);
1058 
1059 	return ret;
1060 }
1061 
1062 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1063 {
1064 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1065 	int ret = 0;
1066 
1067 	if (!pp_funcs->get_pp_table)
1068 		return 0;
1069 
1070 	mutex_lock(&adev->pm.mutex);
1071 	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1072 				     table);
1073 	mutex_unlock(&adev->pm.mutex);
1074 
1075 	return ret;
1076 }
1077 
1078 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1079 				      uint32_t type,
1080 				      long *input,
1081 				      uint32_t size)
1082 {
1083 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1084 	int ret = 0;
1085 
1086 	if (!pp_funcs->set_fine_grain_clk_vol)
1087 		return 0;
1088 
1089 	mutex_lock(&adev->pm.mutex);
1090 	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1091 					       type,
1092 					       input,
1093 					       size);
1094 	mutex_unlock(&adev->pm.mutex);
1095 
1096 	return ret;
1097 }
1098 
1099 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1100 				  uint32_t type,
1101 				  long *input,
1102 				  uint32_t size)
1103 {
1104 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1105 	int ret = 0;
1106 
1107 	if (!pp_funcs->odn_edit_dpm_table)
1108 		return 0;
1109 
1110 	mutex_lock(&adev->pm.mutex);
1111 	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1112 					   type,
1113 					   input,
1114 					   size);
1115 	mutex_unlock(&adev->pm.mutex);
1116 
1117 	return ret;
1118 }
1119 
1120 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1121 				  enum pp_clock_type type,
1122 				  char *buf)
1123 {
1124 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1125 	int ret = 0;
1126 
1127 	if (!pp_funcs->print_clock_levels)
1128 		return 0;
1129 
1130 	mutex_lock(&adev->pm.mutex);
1131 	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1132 					   type,
1133 					   buf);
1134 	mutex_unlock(&adev->pm.mutex);
1135 
1136 	return ret;
1137 }
1138 
1139 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1140 				  enum pp_clock_type type,
1141 				  char *buf,
1142 				  int *offset)
1143 {
1144 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1145 	int ret = 0;
1146 
1147 	if (!pp_funcs->emit_clock_levels)
1148 		return -ENOENT;
1149 
1150 	mutex_lock(&adev->pm.mutex);
1151 	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1152 					   type,
1153 					   buf,
1154 					   offset);
1155 	mutex_unlock(&adev->pm.mutex);
1156 
1157 	return ret;
1158 }
1159 
1160 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1161 				    uint64_t ppfeature_masks)
1162 {
1163 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1164 	int ret = 0;
1165 
1166 	if (!pp_funcs->set_ppfeature_status)
1167 		return 0;
1168 
1169 	mutex_lock(&adev->pm.mutex);
1170 	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1171 					     ppfeature_masks);
1172 	mutex_unlock(&adev->pm.mutex);
1173 
1174 	return ret;
1175 }
1176 
1177 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1178 {
1179 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1180 	int ret = 0;
1181 
1182 	if (!pp_funcs->get_ppfeature_status)
1183 		return 0;
1184 
1185 	mutex_lock(&adev->pm.mutex);
1186 	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1187 					     buf);
1188 	mutex_unlock(&adev->pm.mutex);
1189 
1190 	return ret;
1191 }
1192 
1193 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1194 				 enum pp_clock_type type,
1195 				 uint32_t mask)
1196 {
1197 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1198 	int ret = 0;
1199 
1200 	if (!pp_funcs->force_clock_level)
1201 		return 0;
1202 
1203 	mutex_lock(&adev->pm.mutex);
1204 	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1205 					  type,
1206 					  mask);
1207 	mutex_unlock(&adev->pm.mutex);
1208 
1209 	return ret;
1210 }
1211 
1212 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1213 {
1214 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1215 	int ret = 0;
1216 
1217 	if (!pp_funcs->get_sclk_od)
1218 		return -EOPNOTSUPP;
1219 
1220 	mutex_lock(&adev->pm.mutex);
1221 	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1222 	mutex_unlock(&adev->pm.mutex);
1223 
1224 	return ret;
1225 }
1226 
1227 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1228 {
1229 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1230 
1231 	if (is_support_sw_smu(adev))
1232 		return -EOPNOTSUPP;
1233 
1234 	mutex_lock(&adev->pm.mutex);
1235 	if (pp_funcs->set_sclk_od)
1236 		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1237 	mutex_unlock(&adev->pm.mutex);
1238 
1239 	if (amdgpu_dpm_dispatch_task(adev,
1240 				     AMD_PP_TASK_READJUST_POWER_STATE,
1241 				     NULL) == -EOPNOTSUPP) {
1242 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1243 		amdgpu_dpm_compute_clocks(adev);
1244 	}
1245 
1246 	return 0;
1247 }
1248 
1249 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1250 {
1251 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1252 	int ret = 0;
1253 
1254 	if (!pp_funcs->get_mclk_od)
1255 		return -EOPNOTSUPP;
1256 
1257 	mutex_lock(&adev->pm.mutex);
1258 	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1259 	mutex_unlock(&adev->pm.mutex);
1260 
1261 	return ret;
1262 }
1263 
1264 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1265 {
1266 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1267 
1268 	if (is_support_sw_smu(adev))
1269 		return -EOPNOTSUPP;
1270 
1271 	mutex_lock(&adev->pm.mutex);
1272 	if (pp_funcs->set_mclk_od)
1273 		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1274 	mutex_unlock(&adev->pm.mutex);
1275 
1276 	if (amdgpu_dpm_dispatch_task(adev,
1277 				     AMD_PP_TASK_READJUST_POWER_STATE,
1278 				     NULL) == -EOPNOTSUPP) {
1279 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1280 		amdgpu_dpm_compute_clocks(adev);
1281 	}
1282 
1283 	return 0;
1284 }
1285 
1286 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1287 				      char *buf)
1288 {
1289 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1290 	int ret = 0;
1291 
1292 	if (!pp_funcs->get_power_profile_mode)
1293 		return -EOPNOTSUPP;
1294 
1295 	mutex_lock(&adev->pm.mutex);
1296 	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1297 					       buf);
1298 	mutex_unlock(&adev->pm.mutex);
1299 
1300 	return ret;
1301 }
1302 
1303 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1304 				      long *input, uint32_t size)
1305 {
1306 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1307 	int ret = 0;
1308 
1309 	if (!pp_funcs->set_power_profile_mode)
1310 		return 0;
1311 
1312 	mutex_lock(&adev->pm.mutex);
1313 	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1314 					       input,
1315 					       size);
1316 	mutex_unlock(&adev->pm.mutex);
1317 
1318 	return ret;
1319 }
1320 
1321 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1322 {
1323 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1324 	int ret = 0;
1325 
1326 	if (!pp_funcs->get_gpu_metrics)
1327 		return 0;
1328 
1329 	mutex_lock(&adev->pm.mutex);
1330 	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1331 					table);
1332 	mutex_unlock(&adev->pm.mutex);
1333 
1334 	return ret;
1335 }
1336 
1337 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1338 				  size_t size)
1339 {
1340 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1341 	int ret = 0;
1342 
1343 	if (!pp_funcs->get_pm_metrics)
1344 		return -EOPNOTSUPP;
1345 
1346 	mutex_lock(&adev->pm.mutex);
1347 	ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1348 				       size);
1349 	mutex_unlock(&adev->pm.mutex);
1350 
1351 	return ret;
1352 }
1353 
1354 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1355 				    uint32_t *fan_mode)
1356 {
1357 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1358 	int ret = 0;
1359 
1360 	if (!pp_funcs->get_fan_control_mode)
1361 		return -EOPNOTSUPP;
1362 
1363 	mutex_lock(&adev->pm.mutex);
1364 	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1365 					     fan_mode);
1366 	mutex_unlock(&adev->pm.mutex);
1367 
1368 	return ret;
1369 }
1370 
1371 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1372 				 uint32_t speed)
1373 {
1374 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1375 	int ret = 0;
1376 
1377 	if (!pp_funcs->set_fan_speed_pwm)
1378 		return -EOPNOTSUPP;
1379 
1380 	mutex_lock(&adev->pm.mutex);
1381 	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1382 					  speed);
1383 	mutex_unlock(&adev->pm.mutex);
1384 
1385 	return ret;
1386 }
1387 
1388 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1389 				 uint32_t *speed)
1390 {
1391 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1392 	int ret = 0;
1393 
1394 	if (!pp_funcs->get_fan_speed_pwm)
1395 		return -EOPNOTSUPP;
1396 
1397 	mutex_lock(&adev->pm.mutex);
1398 	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1399 					  speed);
1400 	mutex_unlock(&adev->pm.mutex);
1401 
1402 	return ret;
1403 }
1404 
1405 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1406 				 uint32_t *speed)
1407 {
1408 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1409 	int ret = 0;
1410 
1411 	if (!pp_funcs->get_fan_speed_rpm)
1412 		return -EOPNOTSUPP;
1413 
1414 	mutex_lock(&adev->pm.mutex);
1415 	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1416 					  speed);
1417 	mutex_unlock(&adev->pm.mutex);
1418 
1419 	return ret;
1420 }
1421 
1422 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1423 				 uint32_t speed)
1424 {
1425 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1426 	int ret = 0;
1427 
1428 	if (!pp_funcs->set_fan_speed_rpm)
1429 		return -EOPNOTSUPP;
1430 
1431 	mutex_lock(&adev->pm.mutex);
1432 	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1433 					  speed);
1434 	mutex_unlock(&adev->pm.mutex);
1435 
1436 	return ret;
1437 }
1438 
1439 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1440 				    uint32_t mode)
1441 {
1442 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1443 	int ret = 0;
1444 
1445 	if (!pp_funcs->set_fan_control_mode)
1446 		return -EOPNOTSUPP;
1447 
1448 	mutex_lock(&adev->pm.mutex);
1449 	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1450 					     mode);
1451 	mutex_unlock(&adev->pm.mutex);
1452 
1453 	return ret;
1454 }
1455 
1456 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1457 			       uint32_t *limit,
1458 			       enum pp_power_limit_level pp_limit_level,
1459 			       enum pp_power_type power_type)
1460 {
1461 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1462 	int ret = 0;
1463 
1464 	if (!pp_funcs->get_power_limit)
1465 		return -ENODATA;
1466 
1467 	mutex_lock(&adev->pm.mutex);
1468 	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1469 					limit,
1470 					pp_limit_level,
1471 					power_type);
1472 	mutex_unlock(&adev->pm.mutex);
1473 
1474 	return ret;
1475 }
1476 
1477 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1478 			       uint32_t limit)
1479 {
1480 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1481 	int ret = 0;
1482 
1483 	if (!pp_funcs->set_power_limit)
1484 		return -EINVAL;
1485 
1486 	mutex_lock(&adev->pm.mutex);
1487 	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1488 					limit);
1489 	mutex_unlock(&adev->pm.mutex);
1490 
1491 	return ret;
1492 }
1493 
1494 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1495 {
1496 	bool cclk_dpm_supported = false;
1497 
1498 	if (!is_support_sw_smu(adev))
1499 		return false;
1500 
1501 	mutex_lock(&adev->pm.mutex);
1502 	cclk_dpm_supported = is_support_cclk_dpm(adev);
1503 	mutex_unlock(&adev->pm.mutex);
1504 
1505 	return (int)cclk_dpm_supported;
1506 }
1507 
1508 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1509 						       struct seq_file *m)
1510 {
1511 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1512 
1513 	if (!pp_funcs->debugfs_print_current_performance_level)
1514 		return -EOPNOTSUPP;
1515 
1516 	mutex_lock(&adev->pm.mutex);
1517 	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1518 							  m);
1519 	mutex_unlock(&adev->pm.mutex);
1520 
1521 	return 0;
1522 }
1523 
1524 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1525 				       void **addr,
1526 				       size_t *size)
1527 {
1528 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1529 	int ret = 0;
1530 
1531 	if (!pp_funcs->get_smu_prv_buf_details)
1532 		return -ENOSYS;
1533 
1534 	mutex_lock(&adev->pm.mutex);
1535 	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1536 						addr,
1537 						size);
1538 	mutex_unlock(&adev->pm.mutex);
1539 
1540 	return ret;
1541 }
1542 
1543 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1544 {
1545 	if (is_support_sw_smu(adev)) {
1546 		struct smu_context *smu = adev->powerplay.pp_handle;
1547 
1548 		return (smu->od_enabled || smu->is_apu);
1549 	} else {
1550 		struct pp_hwmgr *hwmgr;
1551 
1552 		/*
1553 		 * dpm on some legacy asics don't carry od_enabled member
1554 		 * as its pp_handle is casted directly from adev.
1555 		 */
1556 		if (amdgpu_dpm_is_legacy_dpm(adev))
1557 			return false;
1558 
1559 		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1560 
1561 		return hwmgr->od_enabled;
1562 	}
1563 }
1564 
1565 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1566 			    const char *buf,
1567 			    size_t size)
1568 {
1569 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1570 	int ret = 0;
1571 
1572 	if (!pp_funcs->set_pp_table)
1573 		return -EOPNOTSUPP;
1574 
1575 	mutex_lock(&adev->pm.mutex);
1576 	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1577 				     buf,
1578 				     size);
1579 	mutex_unlock(&adev->pm.mutex);
1580 
1581 	return ret;
1582 }
1583 
1584 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1585 {
1586 	struct smu_context *smu = adev->powerplay.pp_handle;
1587 
1588 	if (!is_support_sw_smu(adev))
1589 		return INT_MAX;
1590 
1591 	return smu->cpu_core_num;
1592 }
1593 
1594 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1595 {
1596 	if (!is_support_sw_smu(adev))
1597 		return;
1598 
1599 	amdgpu_smu_stb_debug_fs_init(adev);
1600 }
1601 
1602 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1603 					    const struct amd_pp_display_configuration *input)
1604 {
1605 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1606 	int ret = 0;
1607 
1608 	if (!pp_funcs->display_configuration_change)
1609 		return 0;
1610 
1611 	mutex_lock(&adev->pm.mutex);
1612 	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1613 						     input);
1614 	mutex_unlock(&adev->pm.mutex);
1615 
1616 	return ret;
1617 }
1618 
1619 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1620 				 enum amd_pp_clock_type type,
1621 				 struct amd_pp_clocks *clocks)
1622 {
1623 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1624 	int ret = 0;
1625 
1626 	if (!pp_funcs->get_clock_by_type)
1627 		return 0;
1628 
1629 	mutex_lock(&adev->pm.mutex);
1630 	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1631 					  type,
1632 					  clocks);
1633 	mutex_unlock(&adev->pm.mutex);
1634 
1635 	return ret;
1636 }
1637 
1638 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1639 						struct amd_pp_simple_clock_info *clocks)
1640 {
1641 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1642 	int ret = 0;
1643 
1644 	if (!pp_funcs->get_display_mode_validation_clocks)
1645 		return 0;
1646 
1647 	mutex_lock(&adev->pm.mutex);
1648 	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1649 							   clocks);
1650 	mutex_unlock(&adev->pm.mutex);
1651 
1652 	return ret;
1653 }
1654 
1655 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1656 					      enum amd_pp_clock_type type,
1657 					      struct pp_clock_levels_with_latency *clocks)
1658 {
1659 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1660 	int ret = 0;
1661 
1662 	if (!pp_funcs->get_clock_by_type_with_latency)
1663 		return 0;
1664 
1665 	mutex_lock(&adev->pm.mutex);
1666 	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1667 						       type,
1668 						       clocks);
1669 	mutex_unlock(&adev->pm.mutex);
1670 
1671 	return ret;
1672 }
1673 
1674 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1675 					      enum amd_pp_clock_type type,
1676 					      struct pp_clock_levels_with_voltage *clocks)
1677 {
1678 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1679 	int ret = 0;
1680 
1681 	if (!pp_funcs->get_clock_by_type_with_voltage)
1682 		return 0;
1683 
1684 	mutex_lock(&adev->pm.mutex);
1685 	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1686 						       type,
1687 						       clocks);
1688 	mutex_unlock(&adev->pm.mutex);
1689 
1690 	return ret;
1691 }
1692 
1693 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1694 					       void *clock_ranges)
1695 {
1696 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1697 	int ret = 0;
1698 
1699 	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1700 		return -EOPNOTSUPP;
1701 
1702 	mutex_lock(&adev->pm.mutex);
1703 	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1704 							 clock_ranges);
1705 	mutex_unlock(&adev->pm.mutex);
1706 
1707 	return ret;
1708 }
1709 
1710 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1711 					     struct pp_display_clock_request *clock)
1712 {
1713 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1714 	int ret = 0;
1715 
1716 	if (!pp_funcs->display_clock_voltage_request)
1717 		return -EOPNOTSUPP;
1718 
1719 	mutex_lock(&adev->pm.mutex);
1720 	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1721 						      clock);
1722 	mutex_unlock(&adev->pm.mutex);
1723 
1724 	return ret;
1725 }
1726 
1727 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1728 				  struct amd_pp_clock_info *clocks)
1729 {
1730 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1731 	int ret = 0;
1732 
1733 	if (!pp_funcs->get_current_clocks)
1734 		return -EOPNOTSUPP;
1735 
1736 	mutex_lock(&adev->pm.mutex);
1737 	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1738 					   clocks);
1739 	mutex_unlock(&adev->pm.mutex);
1740 
1741 	return ret;
1742 }
1743 
1744 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1745 {
1746 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1747 
1748 	if (!pp_funcs->notify_smu_enable_pwe)
1749 		return;
1750 
1751 	mutex_lock(&adev->pm.mutex);
1752 	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1753 	mutex_unlock(&adev->pm.mutex);
1754 }
1755 
1756 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1757 					uint32_t count)
1758 {
1759 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1760 	int ret = 0;
1761 
1762 	if (!pp_funcs->set_active_display_count)
1763 		return -EOPNOTSUPP;
1764 
1765 	mutex_lock(&adev->pm.mutex);
1766 	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1767 						 count);
1768 	mutex_unlock(&adev->pm.mutex);
1769 
1770 	return ret;
1771 }
1772 
1773 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1774 					  uint32_t clock)
1775 {
1776 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1777 	int ret = 0;
1778 
1779 	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1780 		return -EOPNOTSUPP;
1781 
1782 	mutex_lock(&adev->pm.mutex);
1783 	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1784 						   clock);
1785 	mutex_unlock(&adev->pm.mutex);
1786 
1787 	return ret;
1788 }
1789 
1790 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1791 					     uint32_t clock)
1792 {
1793 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1794 
1795 	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1796 		return;
1797 
1798 	mutex_lock(&adev->pm.mutex);
1799 	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1800 					       clock);
1801 	mutex_unlock(&adev->pm.mutex);
1802 }
1803 
1804 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1805 					  uint32_t clock)
1806 {
1807 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1808 
1809 	if (!pp_funcs->set_hard_min_fclk_by_freq)
1810 		return;
1811 
1812 	mutex_lock(&adev->pm.mutex);
1813 	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1814 					    clock);
1815 	mutex_unlock(&adev->pm.mutex);
1816 }
1817 
1818 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1819 						   bool disable_memory_clock_switch)
1820 {
1821 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1822 	int ret = 0;
1823 
1824 	if (!pp_funcs->display_disable_memory_clock_switch)
1825 		return 0;
1826 
1827 	mutex_lock(&adev->pm.mutex);
1828 	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1829 							    disable_memory_clock_switch);
1830 	mutex_unlock(&adev->pm.mutex);
1831 
1832 	return ret;
1833 }
1834 
1835 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1836 						struct pp_smu_nv_clock_table *max_clocks)
1837 {
1838 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1839 	int ret = 0;
1840 
1841 	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1842 		return -EOPNOTSUPP;
1843 
1844 	mutex_lock(&adev->pm.mutex);
1845 	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1846 							 max_clocks);
1847 	mutex_unlock(&adev->pm.mutex);
1848 
1849 	return ret;
1850 }
1851 
1852 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1853 						  unsigned int *clock_values_in_khz,
1854 						  unsigned int *num_states)
1855 {
1856 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1857 	int ret = 0;
1858 
1859 	if (!pp_funcs->get_uclk_dpm_states)
1860 		return -EOPNOTSUPP;
1861 
1862 	mutex_lock(&adev->pm.mutex);
1863 	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1864 					    clock_values_in_khz,
1865 					    num_states);
1866 	mutex_unlock(&adev->pm.mutex);
1867 
1868 	return ret;
1869 }
1870 
1871 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1872 				   struct dpm_clocks *clock_table)
1873 {
1874 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1875 	int ret = 0;
1876 
1877 	if (!pp_funcs->get_dpm_clock_table)
1878 		return -EOPNOTSUPP;
1879 
1880 	mutex_lock(&adev->pm.mutex);
1881 	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1882 					    clock_table);
1883 	mutex_unlock(&adev->pm.mutex);
1884 
1885 	return ret;
1886 }
1887