xref: /linux/drivers/gpu/drm/amd/pm/amdgpu_dpm.c (revision 1c22d6ce53280763bcb4cb24d4f71111fff4a526)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35 
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38 
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40 
41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 	int ret = 0;
45 
46 	if (!pp_funcs->get_sclk)
47 		return 0;
48 
49 	mutex_lock(&adev->pm.mutex);
50 	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 				 low);
52 	mutex_unlock(&adev->pm.mutex);
53 
54 	return ret;
55 }
56 
57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 	int ret = 0;
61 
62 	if (!pp_funcs->get_mclk)
63 		return 0;
64 
65 	mutex_lock(&adev->pm.mutex);
66 	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 				 low);
68 	mutex_unlock(&adev->pm.mutex);
69 
70 	return ret;
71 }
72 
73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
74 {
75 	int ret = 0;
76 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
77 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
78 
79 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
80 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
81 				block_type, gate ? "gate" : "ungate");
82 		return 0;
83 	}
84 
85 	mutex_lock(&adev->pm.mutex);
86 
87 	switch (block_type) {
88 	case AMD_IP_BLOCK_TYPE_UVD:
89 	case AMD_IP_BLOCK_TYPE_VCE:
90 	case AMD_IP_BLOCK_TYPE_GFX:
91 	case AMD_IP_BLOCK_TYPE_VCN:
92 	case AMD_IP_BLOCK_TYPE_SDMA:
93 	case AMD_IP_BLOCK_TYPE_JPEG:
94 	case AMD_IP_BLOCK_TYPE_GMC:
95 	case AMD_IP_BLOCK_TYPE_ACP:
96 	case AMD_IP_BLOCK_TYPE_VPE:
97 		if (pp_funcs && pp_funcs->set_powergating_by_smu)
98 			ret = (pp_funcs->set_powergating_by_smu(
99 				(adev)->powerplay.pp_handle, block_type, gate));
100 		break;
101 	default:
102 		break;
103 	}
104 
105 	if (!ret)
106 		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
107 
108 	mutex_unlock(&adev->pm.mutex);
109 
110 	return ret;
111 }
112 
113 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
114 {
115 	struct smu_context *smu = adev->powerplay.pp_handle;
116 	int ret = -EOPNOTSUPP;
117 
118 	mutex_lock(&adev->pm.mutex);
119 	ret = smu_set_gfx_power_up_by_imu(smu);
120 	mutex_unlock(&adev->pm.mutex);
121 
122 	msleep(10);
123 
124 	return ret;
125 }
126 
127 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
128 {
129 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
130 	void *pp_handle = adev->powerplay.pp_handle;
131 	int ret = 0;
132 
133 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
134 		return -ENOENT;
135 
136 	mutex_lock(&adev->pm.mutex);
137 
138 	/* enter BACO state */
139 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
140 
141 	mutex_unlock(&adev->pm.mutex);
142 
143 	return ret;
144 }
145 
146 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
147 {
148 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
149 	void *pp_handle = adev->powerplay.pp_handle;
150 	int ret = 0;
151 
152 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
153 		return -ENOENT;
154 
155 	mutex_lock(&adev->pm.mutex);
156 
157 	/* exit BACO state */
158 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
159 
160 	mutex_unlock(&adev->pm.mutex);
161 
162 	return ret;
163 }
164 
165 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
166 			     enum pp_mp1_state mp1_state)
167 {
168 	int ret = 0;
169 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
170 
171 	if (pp_funcs && pp_funcs->set_mp1_state) {
172 		mutex_lock(&adev->pm.mutex);
173 
174 		ret = pp_funcs->set_mp1_state(
175 				adev->powerplay.pp_handle,
176 				mp1_state);
177 
178 		mutex_unlock(&adev->pm.mutex);
179 	}
180 
181 	return ret;
182 }
183 
184 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
185 {
186 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
187 	void *pp_handle = adev->powerplay.pp_handle;
188 	bool ret;
189 
190 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
191 		return false;
192 	/* Don't use baco for reset in S3.
193 	 * This is a workaround for some platforms
194 	 * where entering BACO during suspend
195 	 * seems to cause reboots or hangs.
196 	 * This might be related to the fact that BACO controls
197 	 * power to the whole GPU including devices like audio and USB.
198 	 * Powering down/up everything may adversely affect these other
199 	 * devices.  Needs more investigation.
200 	 */
201 	if (adev->in_s3)
202 		return false;
203 
204 	mutex_lock(&adev->pm.mutex);
205 
206 	ret = pp_funcs->get_asic_baco_capability(pp_handle);
207 
208 	mutex_unlock(&adev->pm.mutex);
209 
210 	return ret;
211 }
212 
213 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
214 {
215 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
216 	void *pp_handle = adev->powerplay.pp_handle;
217 	int ret = 0;
218 
219 	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
220 		return -ENOENT;
221 
222 	mutex_lock(&adev->pm.mutex);
223 
224 	ret = pp_funcs->asic_reset_mode_2(pp_handle);
225 
226 	mutex_unlock(&adev->pm.mutex);
227 
228 	return ret;
229 }
230 
231 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
232 {
233 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
234 	void *pp_handle = adev->powerplay.pp_handle;
235 	int ret = 0;
236 
237 	if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
238 		return -ENOENT;
239 
240 	mutex_lock(&adev->pm.mutex);
241 
242 	ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
243 
244 	mutex_unlock(&adev->pm.mutex);
245 
246 	return ret;
247 }
248 
249 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
250 {
251 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
252 	void *pp_handle = adev->powerplay.pp_handle;
253 	int ret = 0;
254 
255 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
256 		return -ENOENT;
257 
258 	mutex_lock(&adev->pm.mutex);
259 
260 	/* enter BACO state */
261 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
262 	if (ret)
263 		goto out;
264 
265 	/* exit BACO state */
266 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
267 
268 out:
269 	mutex_unlock(&adev->pm.mutex);
270 	return ret;
271 }
272 
273 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
274 {
275 	struct smu_context *smu = adev->powerplay.pp_handle;
276 	bool support_mode1_reset = false;
277 
278 	if (is_support_sw_smu(adev)) {
279 		mutex_lock(&adev->pm.mutex);
280 		support_mode1_reset = smu_mode1_reset_is_support(smu);
281 		mutex_unlock(&adev->pm.mutex);
282 	}
283 
284 	return support_mode1_reset;
285 }
286 
287 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
288 {
289 	struct smu_context *smu = adev->powerplay.pp_handle;
290 	int ret = -EOPNOTSUPP;
291 
292 	if (is_support_sw_smu(adev)) {
293 		mutex_lock(&adev->pm.mutex);
294 		ret = smu_mode1_reset(smu);
295 		mutex_unlock(&adev->pm.mutex);
296 	}
297 
298 	return ret;
299 }
300 
301 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
302 				    enum PP_SMC_POWER_PROFILE type,
303 				    bool en)
304 {
305 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
306 	int ret = 0;
307 
308 	if (amdgpu_sriov_vf(adev))
309 		return 0;
310 
311 	if (pp_funcs && pp_funcs->switch_power_profile) {
312 		mutex_lock(&adev->pm.mutex);
313 		ret = pp_funcs->switch_power_profile(
314 			adev->powerplay.pp_handle, type, en);
315 		mutex_unlock(&adev->pm.mutex);
316 	}
317 
318 	return ret;
319 }
320 
321 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
322 			       uint32_t pstate)
323 {
324 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
325 	int ret = 0;
326 
327 	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
328 		mutex_lock(&adev->pm.mutex);
329 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
330 								pstate);
331 		mutex_unlock(&adev->pm.mutex);
332 	}
333 
334 	return ret;
335 }
336 
337 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
338 			     uint32_t cstate)
339 {
340 	int ret = 0;
341 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
342 	void *pp_handle = adev->powerplay.pp_handle;
343 
344 	if (pp_funcs && pp_funcs->set_df_cstate) {
345 		mutex_lock(&adev->pm.mutex);
346 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
347 		mutex_unlock(&adev->pm.mutex);
348 	}
349 
350 	return ret;
351 }
352 
353 int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
354 {
355 	struct smu_context *smu = adev->powerplay.pp_handle;
356 	int mode = XGMI_PLPD_NONE;
357 
358 	if (is_support_sw_smu(adev)) {
359 		mode = smu->plpd_mode;
360 		if (mode_desc == NULL)
361 			return mode;
362 		switch (smu->plpd_mode) {
363 		case XGMI_PLPD_DISALLOW:
364 			*mode_desc = "disallow";
365 			break;
366 		case XGMI_PLPD_DEFAULT:
367 			*mode_desc = "default";
368 			break;
369 		case XGMI_PLPD_OPTIMIZED:
370 			*mode_desc = "optimized";
371 			break;
372 		case XGMI_PLPD_NONE:
373 		default:
374 			*mode_desc = "none";
375 			break;
376 		}
377 	}
378 
379 	return mode;
380 }
381 
382 int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
383 {
384 	struct smu_context *smu = adev->powerplay.pp_handle;
385 	int ret = -EOPNOTSUPP;
386 
387 	if (is_support_sw_smu(adev)) {
388 		mutex_lock(&adev->pm.mutex);
389 		ret = smu_set_xgmi_plpd_mode(smu, mode);
390 		mutex_unlock(&adev->pm.mutex);
391 	}
392 
393 	return ret;
394 }
395 
396 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
397 {
398 	void *pp_handle = adev->powerplay.pp_handle;
399 	const struct amd_pm_funcs *pp_funcs =
400 			adev->powerplay.pp_funcs;
401 	int ret = 0;
402 
403 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
404 		mutex_lock(&adev->pm.mutex);
405 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
406 		mutex_unlock(&adev->pm.mutex);
407 	}
408 
409 	return ret;
410 }
411 
412 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
413 				      uint32_t msg_id)
414 {
415 	void *pp_handle = adev->powerplay.pp_handle;
416 	const struct amd_pm_funcs *pp_funcs =
417 			adev->powerplay.pp_funcs;
418 	int ret = 0;
419 
420 	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
421 		mutex_lock(&adev->pm.mutex);
422 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
423 						       msg_id);
424 		mutex_unlock(&adev->pm.mutex);
425 	}
426 
427 	return ret;
428 }
429 
430 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
431 				  bool acquire)
432 {
433 	void *pp_handle = adev->powerplay.pp_handle;
434 	const struct amd_pm_funcs *pp_funcs =
435 			adev->powerplay.pp_funcs;
436 	int ret = -EOPNOTSUPP;
437 
438 	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
439 		mutex_lock(&adev->pm.mutex);
440 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
441 						   acquire);
442 		mutex_unlock(&adev->pm.mutex);
443 	}
444 
445 	return ret;
446 }
447 
448 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
449 {
450 	if (adev->pm.dpm_enabled) {
451 		mutex_lock(&adev->pm.mutex);
452 		if (power_supply_is_system_supplied() > 0)
453 			adev->pm.ac_power = true;
454 		else
455 			adev->pm.ac_power = false;
456 
457 		if (adev->powerplay.pp_funcs &&
458 		    adev->powerplay.pp_funcs->enable_bapm)
459 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
460 
461 		if (is_support_sw_smu(adev))
462 			smu_set_ac_dc(adev->powerplay.pp_handle);
463 
464 		mutex_unlock(&adev->pm.mutex);
465 	}
466 }
467 
468 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
469 			   void *data, uint32_t *size)
470 {
471 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
472 	int ret = -EINVAL;
473 
474 	if (!data || !size)
475 		return -EINVAL;
476 
477 	if (pp_funcs && pp_funcs->read_sensor) {
478 		mutex_lock(&adev->pm.mutex);
479 		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
480 					    sensor,
481 					    data,
482 					    size);
483 		mutex_unlock(&adev->pm.mutex);
484 	}
485 
486 	return ret;
487 }
488 
489 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
490 {
491 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
492 	int ret = -EOPNOTSUPP;
493 
494 	if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
495 		mutex_lock(&adev->pm.mutex);
496 		ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
497 		mutex_unlock(&adev->pm.mutex);
498 	}
499 
500 	return ret;
501 }
502 
503 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
504 {
505 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
506 	int ret = -EOPNOTSUPP;
507 
508 	if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
509 		mutex_lock(&adev->pm.mutex);
510 		ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
511 		mutex_unlock(&adev->pm.mutex);
512 	}
513 
514 	return ret;
515 }
516 
517 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
518 {
519 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
520 	int i;
521 
522 	if (!adev->pm.dpm_enabled)
523 		return;
524 
525 	if (!pp_funcs->pm_compute_clocks)
526 		return;
527 
528 	if (adev->mode_info.num_crtc)
529 		amdgpu_display_bandwidth_update(adev);
530 
531 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
532 		struct amdgpu_ring *ring = adev->rings[i];
533 		if (ring && ring->sched.ready)
534 			amdgpu_fence_wait_empty(ring);
535 	}
536 
537 	mutex_lock(&adev->pm.mutex);
538 	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
539 	mutex_unlock(&adev->pm.mutex);
540 }
541 
542 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
543 {
544 	int ret = 0;
545 
546 	if (adev->family == AMDGPU_FAMILY_SI) {
547 		mutex_lock(&adev->pm.mutex);
548 		if (enable) {
549 			adev->pm.dpm.uvd_active = true;
550 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
551 		} else {
552 			adev->pm.dpm.uvd_active = false;
553 		}
554 		mutex_unlock(&adev->pm.mutex);
555 
556 		amdgpu_dpm_compute_clocks(adev);
557 		return;
558 	}
559 
560 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
561 	if (ret)
562 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
563 			  enable ? "enable" : "disable", ret);
564 }
565 
566 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
567 {
568 	int ret = 0;
569 
570 	if (adev->family == AMDGPU_FAMILY_SI) {
571 		mutex_lock(&adev->pm.mutex);
572 		if (enable) {
573 			adev->pm.dpm.vce_active = true;
574 			/* XXX select vce level based on ring/task */
575 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
576 		} else {
577 			adev->pm.dpm.vce_active = false;
578 		}
579 		mutex_unlock(&adev->pm.mutex);
580 
581 		amdgpu_dpm_compute_clocks(adev);
582 		return;
583 	}
584 
585 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
586 	if (ret)
587 		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
588 			  enable ? "enable" : "disable", ret);
589 }
590 
591 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
592 {
593 	int ret = 0;
594 
595 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
596 	if (ret)
597 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
598 			  enable ? "enable" : "disable", ret);
599 }
600 
601 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
602 {
603 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
604 	int r = 0;
605 
606 	if (!pp_funcs || !pp_funcs->load_firmware)
607 		return 0;
608 
609 	mutex_lock(&adev->pm.mutex);
610 	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
611 	if (r) {
612 		pr_err("smu firmware loading failed\n");
613 		goto out;
614 	}
615 
616 	if (smu_version)
617 		*smu_version = adev->pm.fw_version;
618 
619 out:
620 	mutex_unlock(&adev->pm.mutex);
621 	return r;
622 }
623 
624 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
625 {
626 	int ret = 0;
627 
628 	if (is_support_sw_smu(adev)) {
629 		mutex_lock(&adev->pm.mutex);
630 		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
631 						 enable);
632 		mutex_unlock(&adev->pm.mutex);
633 	}
634 
635 	return ret;
636 }
637 
638 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
639 {
640 	struct smu_context *smu = adev->powerplay.pp_handle;
641 	int ret = 0;
642 
643 	if (!is_support_sw_smu(adev))
644 		return -EOPNOTSUPP;
645 
646 	mutex_lock(&adev->pm.mutex);
647 	ret = smu_send_hbm_bad_pages_num(smu, size);
648 	mutex_unlock(&adev->pm.mutex);
649 
650 	return ret;
651 }
652 
653 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
654 {
655 	struct smu_context *smu = adev->powerplay.pp_handle;
656 	int ret = 0;
657 
658 	if (!is_support_sw_smu(adev))
659 		return -EOPNOTSUPP;
660 
661 	mutex_lock(&adev->pm.mutex);
662 	ret = smu_send_hbm_bad_channel_flag(smu, size);
663 	mutex_unlock(&adev->pm.mutex);
664 
665 	return ret;
666 }
667 
668 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
669 				  enum pp_clock_type type,
670 				  uint32_t *min,
671 				  uint32_t *max)
672 {
673 	int ret = 0;
674 
675 	if (type != PP_SCLK)
676 		return -EINVAL;
677 
678 	if (!is_support_sw_smu(adev))
679 		return -EOPNOTSUPP;
680 
681 	mutex_lock(&adev->pm.mutex);
682 	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
683 				     SMU_SCLK,
684 				     min,
685 				     max);
686 	mutex_unlock(&adev->pm.mutex);
687 
688 	return ret;
689 }
690 
691 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
692 				   enum pp_clock_type type,
693 				   uint32_t min,
694 				   uint32_t max)
695 {
696 	struct smu_context *smu = adev->powerplay.pp_handle;
697 	int ret = 0;
698 
699 	if (type != PP_SCLK)
700 		return -EINVAL;
701 
702 	if (!is_support_sw_smu(adev))
703 		return -EOPNOTSUPP;
704 
705 	mutex_lock(&adev->pm.mutex);
706 	ret = smu_set_soft_freq_range(smu,
707 				      SMU_SCLK,
708 				      min,
709 				      max);
710 	mutex_unlock(&adev->pm.mutex);
711 
712 	return ret;
713 }
714 
715 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
716 {
717 	struct smu_context *smu = adev->powerplay.pp_handle;
718 	int ret = 0;
719 
720 	if (!is_support_sw_smu(adev))
721 		return 0;
722 
723 	mutex_lock(&adev->pm.mutex);
724 	ret = smu_write_watermarks_table(smu);
725 	mutex_unlock(&adev->pm.mutex);
726 
727 	return ret;
728 }
729 
730 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
731 			      enum smu_event_type event,
732 			      uint64_t event_arg)
733 {
734 	struct smu_context *smu = adev->powerplay.pp_handle;
735 	int ret = 0;
736 
737 	if (!is_support_sw_smu(adev))
738 		return -EOPNOTSUPP;
739 
740 	mutex_lock(&adev->pm.mutex);
741 	ret = smu_wait_for_event(smu, event, event_arg);
742 	mutex_unlock(&adev->pm.mutex);
743 
744 	return ret;
745 }
746 
747 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
748 {
749 	struct smu_context *smu = adev->powerplay.pp_handle;
750 	int ret = 0;
751 
752 	if (!is_support_sw_smu(adev))
753 		return -EOPNOTSUPP;
754 
755 	mutex_lock(&adev->pm.mutex);
756 	ret = smu_set_residency_gfxoff(smu, value);
757 	mutex_unlock(&adev->pm.mutex);
758 
759 	return ret;
760 }
761 
762 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
763 {
764 	struct smu_context *smu = adev->powerplay.pp_handle;
765 	int ret = 0;
766 
767 	if (!is_support_sw_smu(adev))
768 		return -EOPNOTSUPP;
769 
770 	mutex_lock(&adev->pm.mutex);
771 	ret = smu_get_residency_gfxoff(smu, value);
772 	mutex_unlock(&adev->pm.mutex);
773 
774 	return ret;
775 }
776 
777 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
778 {
779 	struct smu_context *smu = adev->powerplay.pp_handle;
780 	int ret = 0;
781 
782 	if (!is_support_sw_smu(adev))
783 		return -EOPNOTSUPP;
784 
785 	mutex_lock(&adev->pm.mutex);
786 	ret = smu_get_entrycount_gfxoff(smu, value);
787 	mutex_unlock(&adev->pm.mutex);
788 
789 	return ret;
790 }
791 
792 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
793 {
794 	struct smu_context *smu = adev->powerplay.pp_handle;
795 	int ret = 0;
796 
797 	if (!is_support_sw_smu(adev))
798 		return -EOPNOTSUPP;
799 
800 	mutex_lock(&adev->pm.mutex);
801 	ret = smu_get_status_gfxoff(smu, value);
802 	mutex_unlock(&adev->pm.mutex);
803 
804 	return ret;
805 }
806 
807 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
808 {
809 	struct smu_context *smu = adev->powerplay.pp_handle;
810 
811 	if (!is_support_sw_smu(adev))
812 		return 0;
813 
814 	return atomic64_read(&smu->throttle_int_counter);
815 }
816 
817 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
818  * @adev: amdgpu_device pointer
819  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
820  *
821  */
822 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
823 				 enum gfx_change_state state)
824 {
825 	mutex_lock(&adev->pm.mutex);
826 	if (adev->powerplay.pp_funcs &&
827 	    adev->powerplay.pp_funcs->gfx_state_change_set)
828 		((adev)->powerplay.pp_funcs->gfx_state_change_set(
829 			(adev)->powerplay.pp_handle, state));
830 	mutex_unlock(&adev->pm.mutex);
831 }
832 
833 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
834 			    void *umc_ecc)
835 {
836 	struct smu_context *smu = adev->powerplay.pp_handle;
837 	int ret = 0;
838 
839 	if (!is_support_sw_smu(adev))
840 		return -EOPNOTSUPP;
841 
842 	mutex_lock(&adev->pm.mutex);
843 	ret = smu_get_ecc_info(smu, umc_ecc);
844 	mutex_unlock(&adev->pm.mutex);
845 
846 	return ret;
847 }
848 
849 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
850 						     uint32_t idx)
851 {
852 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
853 	struct amd_vce_state *vstate = NULL;
854 
855 	if (!pp_funcs->get_vce_clock_state)
856 		return NULL;
857 
858 	mutex_lock(&adev->pm.mutex);
859 	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
860 					       idx);
861 	mutex_unlock(&adev->pm.mutex);
862 
863 	return vstate;
864 }
865 
866 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
867 					enum amd_pm_state_type *state)
868 {
869 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
870 
871 	mutex_lock(&adev->pm.mutex);
872 
873 	if (!pp_funcs->get_current_power_state) {
874 		*state = adev->pm.dpm.user_state;
875 		goto out;
876 	}
877 
878 	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
879 	if (*state < POWER_STATE_TYPE_DEFAULT ||
880 	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
881 		*state = adev->pm.dpm.user_state;
882 
883 out:
884 	mutex_unlock(&adev->pm.mutex);
885 }
886 
887 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
888 				enum amd_pm_state_type state)
889 {
890 	mutex_lock(&adev->pm.mutex);
891 	adev->pm.dpm.user_state = state;
892 	mutex_unlock(&adev->pm.mutex);
893 
894 	if (is_support_sw_smu(adev))
895 		return;
896 
897 	if (amdgpu_dpm_dispatch_task(adev,
898 				     AMD_PP_TASK_ENABLE_USER_STATE,
899 				     &state) == -EOPNOTSUPP)
900 		amdgpu_dpm_compute_clocks(adev);
901 }
902 
903 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
904 {
905 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
906 	enum amd_dpm_forced_level level;
907 
908 	if (!pp_funcs)
909 		return AMD_DPM_FORCED_LEVEL_AUTO;
910 
911 	mutex_lock(&adev->pm.mutex);
912 	if (pp_funcs->get_performance_level)
913 		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
914 	else
915 		level = adev->pm.dpm.forced_level;
916 	mutex_unlock(&adev->pm.mutex);
917 
918 	return level;
919 }
920 
921 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
922 				       enum amd_dpm_forced_level level)
923 {
924 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
925 	enum amd_dpm_forced_level current_level;
926 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
927 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
928 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
929 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
930 
931 	if (!pp_funcs || !pp_funcs->force_performance_level)
932 		return 0;
933 
934 	if (adev->pm.dpm.thermal_active)
935 		return -EINVAL;
936 
937 	current_level = amdgpu_dpm_get_performance_level(adev);
938 	if (current_level == level)
939 		return 0;
940 
941 	if (adev->asic_type == CHIP_RAVEN) {
942 		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
943 			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
944 			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
945 				amdgpu_gfx_off_ctrl(adev, false);
946 			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
947 				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
948 				amdgpu_gfx_off_ctrl(adev, true);
949 		}
950 	}
951 
952 	if (!(current_level & profile_mode_mask) &&
953 	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
954 		return -EINVAL;
955 
956 	if (!(current_level & profile_mode_mask) &&
957 	      (level & profile_mode_mask)) {
958 		/* enter UMD Pstate */
959 		amdgpu_device_ip_set_powergating_state(adev,
960 						       AMD_IP_BLOCK_TYPE_GFX,
961 						       AMD_PG_STATE_UNGATE);
962 		amdgpu_device_ip_set_clockgating_state(adev,
963 						       AMD_IP_BLOCK_TYPE_GFX,
964 						       AMD_CG_STATE_UNGATE);
965 	} else if ((current_level & profile_mode_mask) &&
966 		    !(level & profile_mode_mask)) {
967 		/* exit UMD Pstate */
968 		amdgpu_device_ip_set_clockgating_state(adev,
969 						       AMD_IP_BLOCK_TYPE_GFX,
970 						       AMD_CG_STATE_GATE);
971 		amdgpu_device_ip_set_powergating_state(adev,
972 						       AMD_IP_BLOCK_TYPE_GFX,
973 						       AMD_PG_STATE_GATE);
974 	}
975 
976 	mutex_lock(&adev->pm.mutex);
977 
978 	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
979 					      level)) {
980 		mutex_unlock(&adev->pm.mutex);
981 		return -EINVAL;
982 	}
983 
984 	adev->pm.dpm.forced_level = level;
985 
986 	mutex_unlock(&adev->pm.mutex);
987 
988 	return 0;
989 }
990 
991 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
992 				 struct pp_states_info *states)
993 {
994 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
995 	int ret = 0;
996 
997 	if (!pp_funcs->get_pp_num_states)
998 		return -EOPNOTSUPP;
999 
1000 	mutex_lock(&adev->pm.mutex);
1001 	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1002 					  states);
1003 	mutex_unlock(&adev->pm.mutex);
1004 
1005 	return ret;
1006 }
1007 
1008 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1009 			      enum amd_pp_task task_id,
1010 			      enum amd_pm_state_type *user_state)
1011 {
1012 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1013 	int ret = 0;
1014 
1015 	if (!pp_funcs->dispatch_tasks)
1016 		return -EOPNOTSUPP;
1017 
1018 	mutex_lock(&adev->pm.mutex);
1019 	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1020 				       task_id,
1021 				       user_state);
1022 	mutex_unlock(&adev->pm.mutex);
1023 
1024 	return ret;
1025 }
1026 
1027 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1028 {
1029 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1030 	int ret = 0;
1031 
1032 	if (!pp_funcs->get_pp_table)
1033 		return 0;
1034 
1035 	mutex_lock(&adev->pm.mutex);
1036 	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1037 				     table);
1038 	mutex_unlock(&adev->pm.mutex);
1039 
1040 	return ret;
1041 }
1042 
1043 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1044 				      uint32_t type,
1045 				      long *input,
1046 				      uint32_t size)
1047 {
1048 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1049 	int ret = 0;
1050 
1051 	if (!pp_funcs->set_fine_grain_clk_vol)
1052 		return 0;
1053 
1054 	mutex_lock(&adev->pm.mutex);
1055 	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1056 					       type,
1057 					       input,
1058 					       size);
1059 	mutex_unlock(&adev->pm.mutex);
1060 
1061 	return ret;
1062 }
1063 
1064 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1065 				  uint32_t type,
1066 				  long *input,
1067 				  uint32_t size)
1068 {
1069 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1070 	int ret = 0;
1071 
1072 	if (!pp_funcs->odn_edit_dpm_table)
1073 		return 0;
1074 
1075 	mutex_lock(&adev->pm.mutex);
1076 	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1077 					   type,
1078 					   input,
1079 					   size);
1080 	mutex_unlock(&adev->pm.mutex);
1081 
1082 	return ret;
1083 }
1084 
1085 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1086 				  enum pp_clock_type type,
1087 				  char *buf)
1088 {
1089 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1090 	int ret = 0;
1091 
1092 	if (!pp_funcs->print_clock_levels)
1093 		return 0;
1094 
1095 	mutex_lock(&adev->pm.mutex);
1096 	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1097 					   type,
1098 					   buf);
1099 	mutex_unlock(&adev->pm.mutex);
1100 
1101 	return ret;
1102 }
1103 
1104 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1105 				  enum pp_clock_type type,
1106 				  char *buf,
1107 				  int *offset)
1108 {
1109 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1110 	int ret = 0;
1111 
1112 	if (!pp_funcs->emit_clock_levels)
1113 		return -ENOENT;
1114 
1115 	mutex_lock(&adev->pm.mutex);
1116 	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1117 					   type,
1118 					   buf,
1119 					   offset);
1120 	mutex_unlock(&adev->pm.mutex);
1121 
1122 	return ret;
1123 }
1124 
1125 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1126 				    uint64_t ppfeature_masks)
1127 {
1128 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1129 	int ret = 0;
1130 
1131 	if (!pp_funcs->set_ppfeature_status)
1132 		return 0;
1133 
1134 	mutex_lock(&adev->pm.mutex);
1135 	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1136 					     ppfeature_masks);
1137 	mutex_unlock(&adev->pm.mutex);
1138 
1139 	return ret;
1140 }
1141 
1142 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1143 {
1144 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1145 	int ret = 0;
1146 
1147 	if (!pp_funcs->get_ppfeature_status)
1148 		return 0;
1149 
1150 	mutex_lock(&adev->pm.mutex);
1151 	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1152 					     buf);
1153 	mutex_unlock(&adev->pm.mutex);
1154 
1155 	return ret;
1156 }
1157 
1158 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1159 				 enum pp_clock_type type,
1160 				 uint32_t mask)
1161 {
1162 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1163 	int ret = 0;
1164 
1165 	if (!pp_funcs->force_clock_level)
1166 		return 0;
1167 
1168 	mutex_lock(&adev->pm.mutex);
1169 	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1170 					  type,
1171 					  mask);
1172 	mutex_unlock(&adev->pm.mutex);
1173 
1174 	return ret;
1175 }
1176 
1177 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1178 {
1179 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1180 	int ret = 0;
1181 
1182 	if (!pp_funcs->get_sclk_od)
1183 		return -EOPNOTSUPP;
1184 
1185 	mutex_lock(&adev->pm.mutex);
1186 	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1187 	mutex_unlock(&adev->pm.mutex);
1188 
1189 	return ret;
1190 }
1191 
1192 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1193 {
1194 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1195 
1196 	if (is_support_sw_smu(adev))
1197 		return -EOPNOTSUPP;
1198 
1199 	mutex_lock(&adev->pm.mutex);
1200 	if (pp_funcs->set_sclk_od)
1201 		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1202 	mutex_unlock(&adev->pm.mutex);
1203 
1204 	if (amdgpu_dpm_dispatch_task(adev,
1205 				     AMD_PP_TASK_READJUST_POWER_STATE,
1206 				     NULL) == -EOPNOTSUPP) {
1207 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1208 		amdgpu_dpm_compute_clocks(adev);
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1215 {
1216 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1217 	int ret = 0;
1218 
1219 	if (!pp_funcs->get_mclk_od)
1220 		return -EOPNOTSUPP;
1221 
1222 	mutex_lock(&adev->pm.mutex);
1223 	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1224 	mutex_unlock(&adev->pm.mutex);
1225 
1226 	return ret;
1227 }
1228 
1229 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1230 {
1231 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1232 
1233 	if (is_support_sw_smu(adev))
1234 		return -EOPNOTSUPP;
1235 
1236 	mutex_lock(&adev->pm.mutex);
1237 	if (pp_funcs->set_mclk_od)
1238 		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1239 	mutex_unlock(&adev->pm.mutex);
1240 
1241 	if (amdgpu_dpm_dispatch_task(adev,
1242 				     AMD_PP_TASK_READJUST_POWER_STATE,
1243 				     NULL) == -EOPNOTSUPP) {
1244 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1245 		amdgpu_dpm_compute_clocks(adev);
1246 	}
1247 
1248 	return 0;
1249 }
1250 
1251 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1252 				      char *buf)
1253 {
1254 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1255 	int ret = 0;
1256 
1257 	if (!pp_funcs->get_power_profile_mode)
1258 		return -EOPNOTSUPP;
1259 
1260 	mutex_lock(&adev->pm.mutex);
1261 	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1262 					       buf);
1263 	mutex_unlock(&adev->pm.mutex);
1264 
1265 	return ret;
1266 }
1267 
1268 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1269 				      long *input, uint32_t size)
1270 {
1271 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1272 	int ret = 0;
1273 
1274 	if (!pp_funcs->set_power_profile_mode)
1275 		return 0;
1276 
1277 	mutex_lock(&adev->pm.mutex);
1278 	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1279 					       input,
1280 					       size);
1281 	mutex_unlock(&adev->pm.mutex);
1282 
1283 	return ret;
1284 }
1285 
1286 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1287 {
1288 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1289 	int ret = 0;
1290 
1291 	if (!pp_funcs->get_gpu_metrics)
1292 		return 0;
1293 
1294 	mutex_lock(&adev->pm.mutex);
1295 	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1296 					table);
1297 	mutex_unlock(&adev->pm.mutex);
1298 
1299 	return ret;
1300 }
1301 
1302 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1303 				  size_t size)
1304 {
1305 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1306 	int ret = 0;
1307 
1308 	if (!pp_funcs->get_pm_metrics)
1309 		return -EOPNOTSUPP;
1310 
1311 	mutex_lock(&adev->pm.mutex);
1312 	ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1313 				       size);
1314 	mutex_unlock(&adev->pm.mutex);
1315 
1316 	return ret;
1317 }
1318 
1319 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1320 				    uint32_t *fan_mode)
1321 {
1322 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1323 	int ret = 0;
1324 
1325 	if (!pp_funcs->get_fan_control_mode)
1326 		return -EOPNOTSUPP;
1327 
1328 	mutex_lock(&adev->pm.mutex);
1329 	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1330 					     fan_mode);
1331 	mutex_unlock(&adev->pm.mutex);
1332 
1333 	return ret;
1334 }
1335 
1336 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1337 				 uint32_t speed)
1338 {
1339 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1340 	int ret = 0;
1341 
1342 	if (!pp_funcs->set_fan_speed_pwm)
1343 		return -EOPNOTSUPP;
1344 
1345 	mutex_lock(&adev->pm.mutex);
1346 	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1347 					  speed);
1348 	mutex_unlock(&adev->pm.mutex);
1349 
1350 	return ret;
1351 }
1352 
1353 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1354 				 uint32_t *speed)
1355 {
1356 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1357 	int ret = 0;
1358 
1359 	if (!pp_funcs->get_fan_speed_pwm)
1360 		return -EOPNOTSUPP;
1361 
1362 	mutex_lock(&adev->pm.mutex);
1363 	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1364 					  speed);
1365 	mutex_unlock(&adev->pm.mutex);
1366 
1367 	return ret;
1368 }
1369 
1370 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1371 				 uint32_t *speed)
1372 {
1373 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1374 	int ret = 0;
1375 
1376 	if (!pp_funcs->get_fan_speed_rpm)
1377 		return -EOPNOTSUPP;
1378 
1379 	mutex_lock(&adev->pm.mutex);
1380 	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1381 					  speed);
1382 	mutex_unlock(&adev->pm.mutex);
1383 
1384 	return ret;
1385 }
1386 
1387 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1388 				 uint32_t speed)
1389 {
1390 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1391 	int ret = 0;
1392 
1393 	if (!pp_funcs->set_fan_speed_rpm)
1394 		return -EOPNOTSUPP;
1395 
1396 	mutex_lock(&adev->pm.mutex);
1397 	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1398 					  speed);
1399 	mutex_unlock(&adev->pm.mutex);
1400 
1401 	return ret;
1402 }
1403 
1404 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1405 				    uint32_t mode)
1406 {
1407 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1408 	int ret = 0;
1409 
1410 	if (!pp_funcs->set_fan_control_mode)
1411 		return -EOPNOTSUPP;
1412 
1413 	mutex_lock(&adev->pm.mutex);
1414 	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1415 					     mode);
1416 	mutex_unlock(&adev->pm.mutex);
1417 
1418 	return ret;
1419 }
1420 
1421 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1422 			       uint32_t *limit,
1423 			       enum pp_power_limit_level pp_limit_level,
1424 			       enum pp_power_type power_type)
1425 {
1426 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1427 	int ret = 0;
1428 
1429 	if (!pp_funcs->get_power_limit)
1430 		return -ENODATA;
1431 
1432 	mutex_lock(&adev->pm.mutex);
1433 	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1434 					limit,
1435 					pp_limit_level,
1436 					power_type);
1437 	mutex_unlock(&adev->pm.mutex);
1438 
1439 	return ret;
1440 }
1441 
1442 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1443 			       uint32_t limit)
1444 {
1445 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1446 	int ret = 0;
1447 
1448 	if (!pp_funcs->set_power_limit)
1449 		return -EINVAL;
1450 
1451 	mutex_lock(&adev->pm.mutex);
1452 	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1453 					limit);
1454 	mutex_unlock(&adev->pm.mutex);
1455 
1456 	return ret;
1457 }
1458 
1459 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1460 {
1461 	bool cclk_dpm_supported = false;
1462 
1463 	if (!is_support_sw_smu(adev))
1464 		return false;
1465 
1466 	mutex_lock(&adev->pm.mutex);
1467 	cclk_dpm_supported = is_support_cclk_dpm(adev);
1468 	mutex_unlock(&adev->pm.mutex);
1469 
1470 	return (int)cclk_dpm_supported;
1471 }
1472 
1473 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1474 						       struct seq_file *m)
1475 {
1476 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1477 
1478 	if (!pp_funcs->debugfs_print_current_performance_level)
1479 		return -EOPNOTSUPP;
1480 
1481 	mutex_lock(&adev->pm.mutex);
1482 	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1483 							  m);
1484 	mutex_unlock(&adev->pm.mutex);
1485 
1486 	return 0;
1487 }
1488 
1489 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1490 				       void **addr,
1491 				       size_t *size)
1492 {
1493 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1494 	int ret = 0;
1495 
1496 	if (!pp_funcs->get_smu_prv_buf_details)
1497 		return -ENOSYS;
1498 
1499 	mutex_lock(&adev->pm.mutex);
1500 	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1501 						addr,
1502 						size);
1503 	mutex_unlock(&adev->pm.mutex);
1504 
1505 	return ret;
1506 }
1507 
1508 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1509 {
1510 	if (is_support_sw_smu(adev)) {
1511 		struct smu_context *smu = adev->powerplay.pp_handle;
1512 
1513 		return (smu->od_enabled || smu->is_apu);
1514 	} else {
1515 		struct pp_hwmgr *hwmgr;
1516 
1517 		/*
1518 		 * dpm on some legacy asics don't carry od_enabled member
1519 		 * as its pp_handle is casted directly from adev.
1520 		 */
1521 		if (amdgpu_dpm_is_legacy_dpm(adev))
1522 			return false;
1523 
1524 		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1525 
1526 		return hwmgr->od_enabled;
1527 	}
1528 }
1529 
1530 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1531 			    const char *buf,
1532 			    size_t size)
1533 {
1534 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1535 	int ret = 0;
1536 
1537 	if (!pp_funcs->set_pp_table)
1538 		return -EOPNOTSUPP;
1539 
1540 	mutex_lock(&adev->pm.mutex);
1541 	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1542 				     buf,
1543 				     size);
1544 	mutex_unlock(&adev->pm.mutex);
1545 
1546 	return ret;
1547 }
1548 
1549 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1550 {
1551 	struct smu_context *smu = adev->powerplay.pp_handle;
1552 
1553 	if (!is_support_sw_smu(adev))
1554 		return INT_MAX;
1555 
1556 	return smu->cpu_core_num;
1557 }
1558 
1559 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1560 {
1561 	if (!is_support_sw_smu(adev))
1562 		return;
1563 
1564 	amdgpu_smu_stb_debug_fs_init(adev);
1565 }
1566 
1567 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1568 					    const struct amd_pp_display_configuration *input)
1569 {
1570 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1571 	int ret = 0;
1572 
1573 	if (!pp_funcs->display_configuration_change)
1574 		return 0;
1575 
1576 	mutex_lock(&adev->pm.mutex);
1577 	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1578 						     input);
1579 	mutex_unlock(&adev->pm.mutex);
1580 
1581 	return ret;
1582 }
1583 
1584 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1585 				 enum amd_pp_clock_type type,
1586 				 struct amd_pp_clocks *clocks)
1587 {
1588 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1589 	int ret = 0;
1590 
1591 	if (!pp_funcs->get_clock_by_type)
1592 		return 0;
1593 
1594 	mutex_lock(&adev->pm.mutex);
1595 	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1596 					  type,
1597 					  clocks);
1598 	mutex_unlock(&adev->pm.mutex);
1599 
1600 	return ret;
1601 }
1602 
1603 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1604 						struct amd_pp_simple_clock_info *clocks)
1605 {
1606 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1607 	int ret = 0;
1608 
1609 	if (!pp_funcs->get_display_mode_validation_clocks)
1610 		return 0;
1611 
1612 	mutex_lock(&adev->pm.mutex);
1613 	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1614 							   clocks);
1615 	mutex_unlock(&adev->pm.mutex);
1616 
1617 	return ret;
1618 }
1619 
1620 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1621 					      enum amd_pp_clock_type type,
1622 					      struct pp_clock_levels_with_latency *clocks)
1623 {
1624 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1625 	int ret = 0;
1626 
1627 	if (!pp_funcs->get_clock_by_type_with_latency)
1628 		return 0;
1629 
1630 	mutex_lock(&adev->pm.mutex);
1631 	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1632 						       type,
1633 						       clocks);
1634 	mutex_unlock(&adev->pm.mutex);
1635 
1636 	return ret;
1637 }
1638 
1639 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1640 					      enum amd_pp_clock_type type,
1641 					      struct pp_clock_levels_with_voltage *clocks)
1642 {
1643 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1644 	int ret = 0;
1645 
1646 	if (!pp_funcs->get_clock_by_type_with_voltage)
1647 		return 0;
1648 
1649 	mutex_lock(&adev->pm.mutex);
1650 	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1651 						       type,
1652 						       clocks);
1653 	mutex_unlock(&adev->pm.mutex);
1654 
1655 	return ret;
1656 }
1657 
1658 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1659 					       void *clock_ranges)
1660 {
1661 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1662 	int ret = 0;
1663 
1664 	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1665 		return -EOPNOTSUPP;
1666 
1667 	mutex_lock(&adev->pm.mutex);
1668 	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1669 							 clock_ranges);
1670 	mutex_unlock(&adev->pm.mutex);
1671 
1672 	return ret;
1673 }
1674 
1675 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1676 					     struct pp_display_clock_request *clock)
1677 {
1678 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1679 	int ret = 0;
1680 
1681 	if (!pp_funcs->display_clock_voltage_request)
1682 		return -EOPNOTSUPP;
1683 
1684 	mutex_lock(&adev->pm.mutex);
1685 	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1686 						      clock);
1687 	mutex_unlock(&adev->pm.mutex);
1688 
1689 	return ret;
1690 }
1691 
1692 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1693 				  struct amd_pp_clock_info *clocks)
1694 {
1695 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1696 	int ret = 0;
1697 
1698 	if (!pp_funcs->get_current_clocks)
1699 		return -EOPNOTSUPP;
1700 
1701 	mutex_lock(&adev->pm.mutex);
1702 	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1703 					   clocks);
1704 	mutex_unlock(&adev->pm.mutex);
1705 
1706 	return ret;
1707 }
1708 
1709 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1710 {
1711 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1712 
1713 	if (!pp_funcs->notify_smu_enable_pwe)
1714 		return;
1715 
1716 	mutex_lock(&adev->pm.mutex);
1717 	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1718 	mutex_unlock(&adev->pm.mutex);
1719 }
1720 
1721 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1722 					uint32_t count)
1723 {
1724 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1725 	int ret = 0;
1726 
1727 	if (!pp_funcs->set_active_display_count)
1728 		return -EOPNOTSUPP;
1729 
1730 	mutex_lock(&adev->pm.mutex);
1731 	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1732 						 count);
1733 	mutex_unlock(&adev->pm.mutex);
1734 
1735 	return ret;
1736 }
1737 
1738 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1739 					  uint32_t clock)
1740 {
1741 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1742 	int ret = 0;
1743 
1744 	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1745 		return -EOPNOTSUPP;
1746 
1747 	mutex_lock(&adev->pm.mutex);
1748 	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1749 						   clock);
1750 	mutex_unlock(&adev->pm.mutex);
1751 
1752 	return ret;
1753 }
1754 
1755 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1756 					     uint32_t clock)
1757 {
1758 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1759 
1760 	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1761 		return;
1762 
1763 	mutex_lock(&adev->pm.mutex);
1764 	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1765 					       clock);
1766 	mutex_unlock(&adev->pm.mutex);
1767 }
1768 
1769 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1770 					  uint32_t clock)
1771 {
1772 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1773 
1774 	if (!pp_funcs->set_hard_min_fclk_by_freq)
1775 		return;
1776 
1777 	mutex_lock(&adev->pm.mutex);
1778 	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1779 					    clock);
1780 	mutex_unlock(&adev->pm.mutex);
1781 }
1782 
1783 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1784 						   bool disable_memory_clock_switch)
1785 {
1786 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1787 	int ret = 0;
1788 
1789 	if (!pp_funcs->display_disable_memory_clock_switch)
1790 		return 0;
1791 
1792 	mutex_lock(&adev->pm.mutex);
1793 	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1794 							    disable_memory_clock_switch);
1795 	mutex_unlock(&adev->pm.mutex);
1796 
1797 	return ret;
1798 }
1799 
1800 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1801 						struct pp_smu_nv_clock_table *max_clocks)
1802 {
1803 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1804 	int ret = 0;
1805 
1806 	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1807 		return -EOPNOTSUPP;
1808 
1809 	mutex_lock(&adev->pm.mutex);
1810 	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1811 							 max_clocks);
1812 	mutex_unlock(&adev->pm.mutex);
1813 
1814 	return ret;
1815 }
1816 
1817 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1818 						  unsigned int *clock_values_in_khz,
1819 						  unsigned int *num_states)
1820 {
1821 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1822 	int ret = 0;
1823 
1824 	if (!pp_funcs->get_uclk_dpm_states)
1825 		return -EOPNOTSUPP;
1826 
1827 	mutex_lock(&adev->pm.mutex);
1828 	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1829 					    clock_values_in_khz,
1830 					    num_states);
1831 	mutex_unlock(&adev->pm.mutex);
1832 
1833 	return ret;
1834 }
1835 
1836 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1837 				   struct dpm_clocks *clock_table)
1838 {
1839 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1840 	int ret = 0;
1841 
1842 	if (!pp_funcs->get_dpm_clock_table)
1843 		return -EOPNOTSUPP;
1844 
1845 	mutex_lock(&adev->pm.mutex);
1846 	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1847 					    clock_table);
1848 	mutex_unlock(&adev->pm.mutex);
1849 
1850 	return ret;
1851 }
1852