xref: /linux/drivers/gpu/drm/amd/pm/amdgpu_dpm.c (revision df02351331671abb26788bc13f6d276e26ae068f)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35 
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38 
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40 
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 	int ret = 0;
45 
46 	if (!pp_funcs->get_sclk)
47 		return 0;
48 
49 	mutex_lock(&adev->pm.mutex);
50 	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 				 low);
52 	mutex_unlock(&adev->pm.mutex);
53 
54 	return ret;
55 }
56 
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 	int ret = 0;
61 
62 	if (!pp_funcs->get_mclk)
63 		return 0;
64 
65 	mutex_lock(&adev->pm.mutex);
66 	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 				 low);
68 	mutex_unlock(&adev->pm.mutex);
69 
70 	return ret;
71 }
72 
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate,int inst)73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
74 				       uint32_t block_type,
75 				       bool gate,
76 				       int inst)
77 {
78 	int ret = 0;
79 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
80 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
81 	bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
82 
83 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
84 			(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
85 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
86 				block_type, gate ? "gate" : "ungate");
87 		return 0;
88 	}
89 
90 	mutex_lock(&adev->pm.mutex);
91 
92 	switch (block_type) {
93 	case AMD_IP_BLOCK_TYPE_UVD:
94 	case AMD_IP_BLOCK_TYPE_VCE:
95 	case AMD_IP_BLOCK_TYPE_GFX:
96 	case AMD_IP_BLOCK_TYPE_SDMA:
97 	case AMD_IP_BLOCK_TYPE_JPEG:
98 	case AMD_IP_BLOCK_TYPE_GMC:
99 	case AMD_IP_BLOCK_TYPE_ACP:
100 	case AMD_IP_BLOCK_TYPE_VPE:
101 		if (pp_funcs && pp_funcs->set_powergating_by_smu)
102 			ret = (pp_funcs->set_powergating_by_smu(
103 				(adev)->powerplay.pp_handle, block_type, gate, 0));
104 		break;
105 	case AMD_IP_BLOCK_TYPE_VCN:
106 		if (pp_funcs && pp_funcs->set_powergating_by_smu)
107 			ret = (pp_funcs->set_powergating_by_smu(
108 				(adev)->powerplay.pp_handle, block_type, gate, inst));
109 		break;
110 	default:
111 		break;
112 	}
113 
114 	if (!ret)
115 		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
116 
117 	mutex_unlock(&adev->pm.mutex);
118 
119 	return ret;
120 }
121 
amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device * adev)122 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
123 {
124 	struct smu_context *smu = adev->powerplay.pp_handle;
125 	int ret = -EOPNOTSUPP;
126 
127 	mutex_lock(&adev->pm.mutex);
128 	ret = smu_set_gfx_power_up_by_imu(smu);
129 	mutex_unlock(&adev->pm.mutex);
130 
131 	msleep(10);
132 
133 	return ret;
134 }
135 
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)136 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
137 {
138 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
139 	void *pp_handle = adev->powerplay.pp_handle;
140 	int ret = 0;
141 
142 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
143 		return -ENOENT;
144 
145 	mutex_lock(&adev->pm.mutex);
146 
147 	/* enter BACO state */
148 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
149 
150 	mutex_unlock(&adev->pm.mutex);
151 
152 	return ret;
153 }
154 
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)155 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
156 {
157 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
158 	void *pp_handle = adev->powerplay.pp_handle;
159 	int ret = 0;
160 
161 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
162 		return -ENOENT;
163 
164 	mutex_lock(&adev->pm.mutex);
165 
166 	/* exit BACO state */
167 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
168 
169 	mutex_unlock(&adev->pm.mutex);
170 
171 	return ret;
172 }
173 
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)174 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
175 			     enum pp_mp1_state mp1_state)
176 {
177 	int ret = 0;
178 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
179 
180 	if (mp1_state == PP_MP1_STATE_FLR) {
181 		/* VF lost access to SMU */
182 		if (amdgpu_sriov_vf(adev))
183 			adev->pm.dpm_enabled = false;
184 	} else if (pp_funcs && pp_funcs->set_mp1_state) {
185 		mutex_lock(&adev->pm.mutex);
186 
187 		ret = pp_funcs->set_mp1_state(
188 				adev->powerplay.pp_handle,
189 				mp1_state);
190 
191 		mutex_unlock(&adev->pm.mutex);
192 	}
193 
194 	return ret;
195 }
196 
amdgpu_dpm_notify_rlc_state(struct amdgpu_device * adev,bool en)197 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
198 {
199 	int ret = 0;
200 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
201 
202 	if (pp_funcs && pp_funcs->notify_rlc_state) {
203 		mutex_lock(&adev->pm.mutex);
204 
205 		ret = pp_funcs->notify_rlc_state(
206 				adev->powerplay.pp_handle,
207 				en);
208 
209 		mutex_unlock(&adev->pm.mutex);
210 	}
211 
212 	return ret;
213 }
214 
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)215 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
216 {
217 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
218 	void *pp_handle = adev->powerplay.pp_handle;
219 	int ret;
220 
221 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
222 		return 0;
223 	/* Don't use baco for reset in S3.
224 	 * This is a workaround for some platforms
225 	 * where entering BACO during suspend
226 	 * seems to cause reboots or hangs.
227 	 * This might be related to the fact that BACO controls
228 	 * power to the whole GPU including devices like audio and USB.
229 	 * Powering down/up everything may adversely affect these other
230 	 * devices.  Needs more investigation.
231 	 */
232 	if (adev->in_s3)
233 		return 0;
234 
235 	mutex_lock(&adev->pm.mutex);
236 
237 	ret = pp_funcs->get_asic_baco_capability(pp_handle);
238 
239 	mutex_unlock(&adev->pm.mutex);
240 
241 	return ret;
242 }
243 
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)244 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
245 {
246 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
247 	void *pp_handle = adev->powerplay.pp_handle;
248 	int ret = 0;
249 
250 	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
251 		return -ENOENT;
252 
253 	mutex_lock(&adev->pm.mutex);
254 
255 	ret = pp_funcs->asic_reset_mode_2(pp_handle);
256 
257 	mutex_unlock(&adev->pm.mutex);
258 
259 	return ret;
260 }
261 
amdgpu_dpm_enable_gfx_features(struct amdgpu_device * adev)262 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
263 {
264 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
265 	void *pp_handle = adev->powerplay.pp_handle;
266 	int ret = 0;
267 
268 	if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
269 		return -ENOENT;
270 
271 	mutex_lock(&adev->pm.mutex);
272 
273 	ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
274 
275 	mutex_unlock(&adev->pm.mutex);
276 
277 	return ret;
278 }
279 
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)280 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
281 {
282 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
283 	void *pp_handle = adev->powerplay.pp_handle;
284 	int ret = 0;
285 
286 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
287 		return -ENOENT;
288 
289 	mutex_lock(&adev->pm.mutex);
290 
291 	/* enter BACO state */
292 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
293 	if (ret)
294 		goto out;
295 
296 	/* exit BACO state */
297 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
298 
299 out:
300 	mutex_unlock(&adev->pm.mutex);
301 	return ret;
302 }
303 
amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device * adev)304 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
305 {
306 	struct smu_context *smu = adev->powerplay.pp_handle;
307 	bool support_mode1_reset = false;
308 
309 	if (is_support_sw_smu(adev)) {
310 		mutex_lock(&adev->pm.mutex);
311 		support_mode1_reset = smu_mode1_reset_is_support(smu);
312 		mutex_unlock(&adev->pm.mutex);
313 	}
314 
315 	return support_mode1_reset;
316 }
317 
amdgpu_dpm_mode1_reset(struct amdgpu_device * adev)318 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
319 {
320 	struct smu_context *smu = adev->powerplay.pp_handle;
321 	int ret = -EOPNOTSUPP;
322 
323 	if (is_support_sw_smu(adev)) {
324 		mutex_lock(&adev->pm.mutex);
325 		ret = smu_mode1_reset(smu);
326 		mutex_unlock(&adev->pm.mutex);
327 	}
328 
329 	return ret;
330 }
331 
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)332 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
333 				    enum PP_SMC_POWER_PROFILE type,
334 				    bool en)
335 {
336 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
337 	int ret = 0;
338 
339 	if (amdgpu_sriov_vf(adev))
340 		return 0;
341 
342 	if (pp_funcs && pp_funcs->switch_power_profile) {
343 		mutex_lock(&adev->pm.mutex);
344 		ret = pp_funcs->switch_power_profile(
345 			adev->powerplay.pp_handle, type, en);
346 		mutex_unlock(&adev->pm.mutex);
347 	}
348 
349 	return ret;
350 }
351 
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)352 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
353 			       uint32_t pstate)
354 {
355 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
356 	int ret = 0;
357 
358 	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
359 		mutex_lock(&adev->pm.mutex);
360 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
361 								pstate);
362 		mutex_unlock(&adev->pm.mutex);
363 	}
364 
365 	return ret;
366 }
367 
amdgpu_dpm_set_df_cstate(struct amdgpu_device * adev,uint32_t cstate)368 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
369 			     uint32_t cstate)
370 {
371 	int ret = 0;
372 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
373 	void *pp_handle = adev->powerplay.pp_handle;
374 
375 	if (pp_funcs && pp_funcs->set_df_cstate) {
376 		mutex_lock(&adev->pm.mutex);
377 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
378 		mutex_unlock(&adev->pm.mutex);
379 	}
380 
381 	return ret;
382 }
383 
amdgpu_dpm_get_pm_policy_info(struct amdgpu_device * adev,enum pp_pm_policy p_type,char * buf)384 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
385 				      enum pp_pm_policy p_type, char *buf)
386 {
387 	struct smu_context *smu = adev->powerplay.pp_handle;
388 	int ret = -EOPNOTSUPP;
389 
390 	if (is_support_sw_smu(adev)) {
391 		mutex_lock(&adev->pm.mutex);
392 		ret = smu_get_pm_policy_info(smu, p_type, buf);
393 		mutex_unlock(&adev->pm.mutex);
394 	}
395 
396 	return ret;
397 }
398 
amdgpu_dpm_set_pm_policy(struct amdgpu_device * adev,int policy_type,int policy_level)399 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
400 			     int policy_level)
401 {
402 	struct smu_context *smu = adev->powerplay.pp_handle;
403 	int ret = -EOPNOTSUPP;
404 
405 	if (is_support_sw_smu(adev)) {
406 		mutex_lock(&adev->pm.mutex);
407 		ret = smu_set_pm_policy(smu, policy_type, policy_level);
408 		mutex_unlock(&adev->pm.mutex);
409 	}
410 
411 	return ret;
412 }
413 
amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device * adev)414 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
415 {
416 	void *pp_handle = adev->powerplay.pp_handle;
417 	const struct amd_pm_funcs *pp_funcs =
418 			adev->powerplay.pp_funcs;
419 	int ret = 0;
420 
421 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
422 		mutex_lock(&adev->pm.mutex);
423 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
424 		mutex_unlock(&adev->pm.mutex);
425 	}
426 
427 	return ret;
428 }
429 
amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device * adev,uint32_t msg_id)430 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
431 				      uint32_t msg_id)
432 {
433 	void *pp_handle = adev->powerplay.pp_handle;
434 	const struct amd_pm_funcs *pp_funcs =
435 			adev->powerplay.pp_funcs;
436 	int ret = 0;
437 
438 	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
439 		mutex_lock(&adev->pm.mutex);
440 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
441 						       msg_id);
442 		mutex_unlock(&adev->pm.mutex);
443 	}
444 
445 	return ret;
446 }
447 
amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device * adev,bool acquire)448 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
449 				  bool acquire)
450 {
451 	void *pp_handle = adev->powerplay.pp_handle;
452 	const struct amd_pm_funcs *pp_funcs =
453 			adev->powerplay.pp_funcs;
454 	int ret = -EOPNOTSUPP;
455 
456 	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
457 		mutex_lock(&adev->pm.mutex);
458 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
459 						   acquire);
460 		mutex_unlock(&adev->pm.mutex);
461 	}
462 
463 	return ret;
464 }
465 
amdgpu_pm_acpi_event_handler(struct amdgpu_device * adev)466 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
467 {
468 	if (adev->pm.dpm_enabled) {
469 		mutex_lock(&adev->pm.mutex);
470 		if (power_supply_is_system_supplied() > 0)
471 			adev->pm.ac_power = true;
472 		else
473 			adev->pm.ac_power = false;
474 
475 		if (adev->powerplay.pp_funcs &&
476 		    adev->powerplay.pp_funcs->enable_bapm)
477 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
478 
479 		if (is_support_sw_smu(adev))
480 			smu_set_ac_dc(adev->powerplay.pp_handle);
481 
482 		mutex_unlock(&adev->pm.mutex);
483 	}
484 }
485 
amdgpu_dpm_read_sensor(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * data,uint32_t * size)486 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
487 			   void *data, uint32_t *size)
488 {
489 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
490 	int ret = -EINVAL;
491 
492 	if (!data || !size)
493 		return -EINVAL;
494 
495 	if (pp_funcs && pp_funcs->read_sensor) {
496 		mutex_lock(&adev->pm.mutex);
497 		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
498 					    sensor,
499 					    data,
500 					    size);
501 		mutex_unlock(&adev->pm.mutex);
502 	}
503 
504 	return ret;
505 }
506 
amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device * adev,uint32_t * limit)507 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
508 {
509 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
510 	int ret = -EOPNOTSUPP;
511 
512 	if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
513 		mutex_lock(&adev->pm.mutex);
514 		ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
515 		mutex_unlock(&adev->pm.mutex);
516 	}
517 
518 	return ret;
519 }
520 
amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device * adev,uint32_t limit)521 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
522 {
523 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
524 	int ret = -EOPNOTSUPP;
525 
526 	if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
527 		mutex_lock(&adev->pm.mutex);
528 		ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
529 		mutex_unlock(&adev->pm.mutex);
530 	}
531 
532 	return ret;
533 }
534 
amdgpu_dpm_compute_clocks(struct amdgpu_device * adev)535 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
536 {
537 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
538 	int i;
539 
540 	if (!adev->pm.dpm_enabled)
541 		return;
542 
543 	if (!pp_funcs->pm_compute_clocks)
544 		return;
545 
546 	if (adev->mode_info.num_crtc)
547 		amdgpu_display_bandwidth_update(adev);
548 
549 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
550 		struct amdgpu_ring *ring = adev->rings[i];
551 		if (ring && ring->sched.ready)
552 			amdgpu_fence_wait_empty(ring);
553 	}
554 
555 	mutex_lock(&adev->pm.mutex);
556 	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
557 	mutex_unlock(&adev->pm.mutex);
558 }
559 
amdgpu_dpm_enable_uvd(struct amdgpu_device * adev,bool enable)560 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
561 {
562 	int ret = 0;
563 
564 	if (adev->family == AMDGPU_FAMILY_SI) {
565 		mutex_lock(&adev->pm.mutex);
566 		if (enable) {
567 			adev->pm.dpm.uvd_active = true;
568 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
569 		} else {
570 			adev->pm.dpm.uvd_active = false;
571 		}
572 		mutex_unlock(&adev->pm.mutex);
573 
574 		amdgpu_dpm_compute_clocks(adev);
575 		return;
576 	}
577 
578 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
579 	if (ret)
580 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
581 			  enable ? "enable" : "disable", ret);
582 }
583 
amdgpu_dpm_enable_vcn(struct amdgpu_device * adev,bool enable,int inst)584 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
585 {
586 	int ret = 0;
587 
588 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
589 	if (ret)
590 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
591 			  enable ? "enable" : "disable", ret);
592 }
593 
amdgpu_dpm_enable_vce(struct amdgpu_device * adev,bool enable)594 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
595 {
596 	int ret = 0;
597 
598 	if (adev->family == AMDGPU_FAMILY_SI) {
599 		mutex_lock(&adev->pm.mutex);
600 		if (enable) {
601 			adev->pm.dpm.vce_active = true;
602 			/* XXX select vce level based on ring/task */
603 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
604 		} else {
605 			adev->pm.dpm.vce_active = false;
606 		}
607 		mutex_unlock(&adev->pm.mutex);
608 
609 		amdgpu_dpm_compute_clocks(adev);
610 		return;
611 	}
612 
613 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
614 	if (ret)
615 		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
616 			  enable ? "enable" : "disable", ret);
617 }
618 
amdgpu_dpm_enable_jpeg(struct amdgpu_device * adev,bool enable)619 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
620 {
621 	int ret = 0;
622 
623 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
624 	if (ret)
625 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
626 			  enable ? "enable" : "disable", ret);
627 }
628 
amdgpu_dpm_enable_vpe(struct amdgpu_device * adev,bool enable)629 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
630 {
631 	int ret = 0;
632 
633 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
634 	if (ret)
635 		DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
636 			  enable ? "enable" : "disable", ret);
637 }
638 
amdgpu_pm_load_smu_firmware(struct amdgpu_device * adev,uint32_t * smu_version)639 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
640 {
641 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
642 	int r = 0;
643 
644 	if (!pp_funcs || !pp_funcs->load_firmware ||
645 	    (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
646 		return 0;
647 
648 	mutex_lock(&adev->pm.mutex);
649 	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
650 	if (r) {
651 		pr_err("smu firmware loading failed\n");
652 		goto out;
653 	}
654 
655 	if (smu_version)
656 		*smu_version = adev->pm.fw_version;
657 
658 out:
659 	mutex_unlock(&adev->pm.mutex);
660 	return r;
661 }
662 
amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device * adev,bool enable)663 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
664 {
665 	int ret = 0;
666 
667 	if (is_support_sw_smu(adev)) {
668 		mutex_lock(&adev->pm.mutex);
669 		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
670 						 enable);
671 		mutex_unlock(&adev->pm.mutex);
672 	}
673 
674 	return ret;
675 }
676 
amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device * adev,uint32_t size)677 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
678 {
679 	struct smu_context *smu = adev->powerplay.pp_handle;
680 	int ret = 0;
681 
682 	if (!is_support_sw_smu(adev))
683 		return -EOPNOTSUPP;
684 
685 	mutex_lock(&adev->pm.mutex);
686 	ret = smu_send_hbm_bad_pages_num(smu, size);
687 	mutex_unlock(&adev->pm.mutex);
688 
689 	return ret;
690 }
691 
amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device * adev,uint32_t size)692 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
693 {
694 	struct smu_context *smu = adev->powerplay.pp_handle;
695 	int ret = 0;
696 
697 	if (!is_support_sw_smu(adev))
698 		return -EOPNOTSUPP;
699 
700 	mutex_lock(&adev->pm.mutex);
701 	ret = smu_send_hbm_bad_channel_flag(smu, size);
702 	mutex_unlock(&adev->pm.mutex);
703 
704 	return ret;
705 }
706 
amdgpu_dpm_send_rma_reason(struct amdgpu_device * adev)707 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
708 {
709 	struct smu_context *smu = adev->powerplay.pp_handle;
710 	int ret;
711 
712 	if (!is_support_sw_smu(adev))
713 		return -EOPNOTSUPP;
714 
715 	mutex_lock(&adev->pm.mutex);
716 	ret = smu_send_rma_reason(smu);
717 	mutex_unlock(&adev->pm.mutex);
718 
719 	if (adev->cper.enabled)
720 		if (amdgpu_cper_generate_bp_threshold_record(adev))
721 			dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
722 
723 	return ret;
724 }
725 
726 /**
727  * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported
728  * @adev: amdgpu_device pointer
729  *
730  * This function checks if the SMU supports resetting the SDMA engine.
731  * It returns false if the hardware does not support software SMU or
732  * if the feature is not supported.
733  */
amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device * adev)734 bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
735 {
736 	struct smu_context *smu = adev->powerplay.pp_handle;
737 	bool ret;
738 
739 	if (!is_support_sw_smu(adev))
740 		return false;
741 
742 	mutex_lock(&adev->pm.mutex);
743 	ret = smu_reset_sdma_is_supported(smu);
744 	mutex_unlock(&adev->pm.mutex);
745 
746 	return ret;
747 }
748 
amdgpu_dpm_reset_sdma(struct amdgpu_device * adev,uint32_t inst_mask)749 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
750 {
751 	struct smu_context *smu = adev->powerplay.pp_handle;
752 	int ret;
753 
754 	if (!is_support_sw_smu(adev))
755 		return -EOPNOTSUPP;
756 
757 	mutex_lock(&adev->pm.mutex);
758 	ret = smu_reset_sdma(smu, inst_mask);
759 	mutex_unlock(&adev->pm.mutex);
760 
761 	return ret;
762 }
763 
amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t * min,uint32_t * max)764 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
765 				  enum pp_clock_type type,
766 				  uint32_t *min,
767 				  uint32_t *max)
768 {
769 	int ret = 0;
770 
771 	if (type != PP_SCLK)
772 		return -EINVAL;
773 
774 	if (!is_support_sw_smu(adev))
775 		return -EOPNOTSUPP;
776 
777 	mutex_lock(&adev->pm.mutex);
778 	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
779 				     SMU_SCLK,
780 				     min,
781 				     max);
782 	mutex_unlock(&adev->pm.mutex);
783 
784 	return ret;
785 }
786 
amdgpu_dpm_set_soft_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t min,uint32_t max)787 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
788 				   enum pp_clock_type type,
789 				   uint32_t min,
790 				   uint32_t max)
791 {
792 	struct smu_context *smu = adev->powerplay.pp_handle;
793 	int ret = 0;
794 
795 	if (type != PP_SCLK)
796 		return -EINVAL;
797 
798 	if (!is_support_sw_smu(adev))
799 		return -EOPNOTSUPP;
800 
801 	mutex_lock(&adev->pm.mutex);
802 	ret = smu_set_soft_freq_range(smu,
803 				      SMU_SCLK,
804 				      min,
805 				      max);
806 	mutex_unlock(&adev->pm.mutex);
807 
808 	return ret;
809 }
810 
amdgpu_dpm_write_watermarks_table(struct amdgpu_device * adev)811 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
812 {
813 	struct smu_context *smu = adev->powerplay.pp_handle;
814 	int ret = 0;
815 
816 	if (!is_support_sw_smu(adev))
817 		return 0;
818 
819 	mutex_lock(&adev->pm.mutex);
820 	ret = smu_write_watermarks_table(smu);
821 	mutex_unlock(&adev->pm.mutex);
822 
823 	return ret;
824 }
825 
amdgpu_dpm_wait_for_event(struct amdgpu_device * adev,enum smu_event_type event,uint64_t event_arg)826 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
827 			      enum smu_event_type event,
828 			      uint64_t event_arg)
829 {
830 	struct smu_context *smu = adev->powerplay.pp_handle;
831 	int ret = 0;
832 
833 	if (!is_support_sw_smu(adev))
834 		return -EOPNOTSUPP;
835 
836 	mutex_lock(&adev->pm.mutex);
837 	ret = smu_wait_for_event(smu, event, event_arg);
838 	mutex_unlock(&adev->pm.mutex);
839 
840 	return ret;
841 }
842 
amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device * adev,bool value)843 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
844 {
845 	struct smu_context *smu = adev->powerplay.pp_handle;
846 	int ret = 0;
847 
848 	if (!is_support_sw_smu(adev))
849 		return -EOPNOTSUPP;
850 
851 	mutex_lock(&adev->pm.mutex);
852 	ret = smu_set_residency_gfxoff(smu, value);
853 	mutex_unlock(&adev->pm.mutex);
854 
855 	return ret;
856 }
857 
amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device * adev,u32 * value)858 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
859 {
860 	struct smu_context *smu = adev->powerplay.pp_handle;
861 	int ret = 0;
862 
863 	if (!is_support_sw_smu(adev))
864 		return -EOPNOTSUPP;
865 
866 	mutex_lock(&adev->pm.mutex);
867 	ret = smu_get_residency_gfxoff(smu, value);
868 	mutex_unlock(&adev->pm.mutex);
869 
870 	return ret;
871 }
872 
amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device * adev,u64 * value)873 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
874 {
875 	struct smu_context *smu = adev->powerplay.pp_handle;
876 	int ret = 0;
877 
878 	if (!is_support_sw_smu(adev))
879 		return -EOPNOTSUPP;
880 
881 	mutex_lock(&adev->pm.mutex);
882 	ret = smu_get_entrycount_gfxoff(smu, value);
883 	mutex_unlock(&adev->pm.mutex);
884 
885 	return ret;
886 }
887 
amdgpu_dpm_get_status_gfxoff(struct amdgpu_device * adev,uint32_t * value)888 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
889 {
890 	struct smu_context *smu = adev->powerplay.pp_handle;
891 	int ret = 0;
892 
893 	if (!is_support_sw_smu(adev))
894 		return -EOPNOTSUPP;
895 
896 	mutex_lock(&adev->pm.mutex);
897 	ret = smu_get_status_gfxoff(smu, value);
898 	mutex_unlock(&adev->pm.mutex);
899 
900 	return ret;
901 }
902 
amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device * adev)903 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
904 {
905 	struct smu_context *smu = adev->powerplay.pp_handle;
906 
907 	if (!is_support_sw_smu(adev))
908 		return 0;
909 
910 	return atomic64_read(&smu->throttle_int_counter);
911 }
912 
913 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
914  * @adev: amdgpu_device pointer
915  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
916  *
917  */
amdgpu_dpm_gfx_state_change(struct amdgpu_device * adev,enum gfx_change_state state)918 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
919 				 enum gfx_change_state state)
920 {
921 	mutex_lock(&adev->pm.mutex);
922 	if (adev->powerplay.pp_funcs &&
923 	    adev->powerplay.pp_funcs->gfx_state_change_set)
924 		((adev)->powerplay.pp_funcs->gfx_state_change_set(
925 			(adev)->powerplay.pp_handle, state));
926 	mutex_unlock(&adev->pm.mutex);
927 }
928 
amdgpu_dpm_get_ecc_info(struct amdgpu_device * adev,void * umc_ecc)929 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
930 			    void *umc_ecc)
931 {
932 	struct smu_context *smu = adev->powerplay.pp_handle;
933 	int ret = 0;
934 
935 	if (!is_support_sw_smu(adev))
936 		return -EOPNOTSUPP;
937 
938 	mutex_lock(&adev->pm.mutex);
939 	ret = smu_get_ecc_info(smu, umc_ecc);
940 	mutex_unlock(&adev->pm.mutex);
941 
942 	return ret;
943 }
944 
amdgpu_dpm_get_vce_clock_state(struct amdgpu_device * adev,uint32_t idx)945 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
946 						     uint32_t idx)
947 {
948 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
949 	struct amd_vce_state *vstate = NULL;
950 
951 	if (!pp_funcs->get_vce_clock_state)
952 		return NULL;
953 
954 	mutex_lock(&adev->pm.mutex);
955 	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
956 					       idx);
957 	mutex_unlock(&adev->pm.mutex);
958 
959 	return vstate;
960 }
961 
amdgpu_dpm_get_current_power_state(struct amdgpu_device * adev,enum amd_pm_state_type * state)962 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
963 					enum amd_pm_state_type *state)
964 {
965 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
966 
967 	mutex_lock(&adev->pm.mutex);
968 
969 	if (!pp_funcs->get_current_power_state) {
970 		*state = adev->pm.dpm.user_state;
971 		goto out;
972 	}
973 
974 	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
975 	if (*state < POWER_STATE_TYPE_DEFAULT ||
976 	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
977 		*state = adev->pm.dpm.user_state;
978 
979 out:
980 	mutex_unlock(&adev->pm.mutex);
981 }
982 
amdgpu_dpm_set_power_state(struct amdgpu_device * adev,enum amd_pm_state_type state)983 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
984 				enum amd_pm_state_type state)
985 {
986 	mutex_lock(&adev->pm.mutex);
987 	adev->pm.dpm.user_state = state;
988 	mutex_unlock(&adev->pm.mutex);
989 
990 	if (is_support_sw_smu(adev))
991 		return;
992 
993 	if (amdgpu_dpm_dispatch_task(adev,
994 				     AMD_PP_TASK_ENABLE_USER_STATE,
995 				     &state) == -EOPNOTSUPP)
996 		amdgpu_dpm_compute_clocks(adev);
997 }
998 
amdgpu_dpm_get_performance_level(struct amdgpu_device * adev)999 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
1000 {
1001 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1002 	enum amd_dpm_forced_level level;
1003 
1004 	if (!pp_funcs)
1005 		return AMD_DPM_FORCED_LEVEL_AUTO;
1006 
1007 	mutex_lock(&adev->pm.mutex);
1008 	if (pp_funcs->get_performance_level)
1009 		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
1010 	else
1011 		level = adev->pm.dpm.forced_level;
1012 	mutex_unlock(&adev->pm.mutex);
1013 
1014 	return level;
1015 }
1016 
amdgpu_dpm_enter_umd_state(struct amdgpu_device * adev)1017 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
1018 {
1019 	/* enter UMD Pstate */
1020 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1021 					       AMD_PG_STATE_UNGATE);
1022 	amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1023 					       AMD_CG_STATE_UNGATE);
1024 }
1025 
amdgpu_dpm_exit_umd_state(struct amdgpu_device * adev)1026 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
1027 {
1028 	/* exit UMD Pstate */
1029 	amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1030 					       AMD_CG_STATE_GATE);
1031 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1032 					       AMD_PG_STATE_GATE);
1033 }
1034 
amdgpu_dpm_force_performance_level(struct amdgpu_device * adev,enum amd_dpm_forced_level level)1035 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
1036 				       enum amd_dpm_forced_level level)
1037 {
1038 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1039 	enum amd_dpm_forced_level current_level;
1040 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1041 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1042 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1043 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1044 
1045 	if (!pp_funcs || !pp_funcs->force_performance_level)
1046 		return 0;
1047 
1048 	if (adev->pm.dpm.thermal_active)
1049 		return -EINVAL;
1050 
1051 	current_level = amdgpu_dpm_get_performance_level(adev);
1052 	if (current_level == level)
1053 		return 0;
1054 
1055 	if (!(current_level & profile_mode_mask) &&
1056 	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
1057 		return -EINVAL;
1058 
1059 	if (adev->asic_type == CHIP_RAVEN) {
1060 		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
1061 			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1062 			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
1063 				amdgpu_gfx_off_ctrl(adev, false);
1064 			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
1065 				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
1066 				amdgpu_gfx_off_ctrl(adev, true);
1067 		}
1068 	}
1069 
1070 	if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
1071 		amdgpu_dpm_enter_umd_state(adev);
1072 	else if ((current_level & profile_mode_mask) &&
1073 		 !(level & profile_mode_mask))
1074 		amdgpu_dpm_exit_umd_state(adev);
1075 
1076 	mutex_lock(&adev->pm.mutex);
1077 
1078 	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1079 					      level)) {
1080 		mutex_unlock(&adev->pm.mutex);
1081 		/* If new level failed, retain the umd state as before */
1082 		if (!(current_level & profile_mode_mask) &&
1083 		    (level & profile_mode_mask))
1084 			amdgpu_dpm_exit_umd_state(adev);
1085 		else if ((current_level & profile_mode_mask) &&
1086 			 !(level & profile_mode_mask))
1087 			amdgpu_dpm_enter_umd_state(adev);
1088 
1089 		return -EINVAL;
1090 	}
1091 
1092 	adev->pm.dpm.forced_level = level;
1093 
1094 	mutex_unlock(&adev->pm.mutex);
1095 
1096 	return 0;
1097 }
1098 
amdgpu_dpm_get_pp_num_states(struct amdgpu_device * adev,struct pp_states_info * states)1099 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1100 				 struct pp_states_info *states)
1101 {
1102 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1103 	int ret = 0;
1104 
1105 	if (!pp_funcs->get_pp_num_states)
1106 		return -EOPNOTSUPP;
1107 
1108 	mutex_lock(&adev->pm.mutex);
1109 	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1110 					  states);
1111 	mutex_unlock(&adev->pm.mutex);
1112 
1113 	return ret;
1114 }
1115 
amdgpu_dpm_dispatch_task(struct amdgpu_device * adev,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)1116 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1117 			      enum amd_pp_task task_id,
1118 			      enum amd_pm_state_type *user_state)
1119 {
1120 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1121 	int ret = 0;
1122 
1123 	if (!pp_funcs->dispatch_tasks)
1124 		return -EOPNOTSUPP;
1125 
1126 	mutex_lock(&adev->pm.mutex);
1127 	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1128 				       task_id,
1129 				       user_state);
1130 	mutex_unlock(&adev->pm.mutex);
1131 
1132 	return ret;
1133 }
1134 
amdgpu_dpm_get_pp_table(struct amdgpu_device * adev,char ** table)1135 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1136 {
1137 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1138 	int ret = 0;
1139 
1140 	if (!pp_funcs->get_pp_table)
1141 		return 0;
1142 
1143 	mutex_lock(&adev->pm.mutex);
1144 	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1145 				     table);
1146 	mutex_unlock(&adev->pm.mutex);
1147 
1148 	return ret;
1149 }
1150 
amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1151 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1152 				      uint32_t type,
1153 				      long *input,
1154 				      uint32_t size)
1155 {
1156 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1157 	int ret = 0;
1158 
1159 	if (!pp_funcs->set_fine_grain_clk_vol)
1160 		return 0;
1161 
1162 	mutex_lock(&adev->pm.mutex);
1163 	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1164 					       type,
1165 					       input,
1166 					       size);
1167 	mutex_unlock(&adev->pm.mutex);
1168 
1169 	return ret;
1170 }
1171 
amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1172 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1173 				  uint32_t type,
1174 				  long *input,
1175 				  uint32_t size)
1176 {
1177 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1178 	int ret = 0;
1179 
1180 	if (!pp_funcs->odn_edit_dpm_table)
1181 		return 0;
1182 
1183 	mutex_lock(&adev->pm.mutex);
1184 	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1185 					   type,
1186 					   input,
1187 					   size);
1188 	mutex_unlock(&adev->pm.mutex);
1189 
1190 	return ret;
1191 }
1192 
amdgpu_dpm_print_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf)1193 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1194 				  enum pp_clock_type type,
1195 				  char *buf)
1196 {
1197 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1198 	int ret = 0;
1199 
1200 	if (!pp_funcs->print_clock_levels)
1201 		return 0;
1202 
1203 	mutex_lock(&adev->pm.mutex);
1204 	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1205 					   type,
1206 					   buf);
1207 	mutex_unlock(&adev->pm.mutex);
1208 
1209 	return ret;
1210 }
1211 
amdgpu_dpm_emit_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf,int * offset)1212 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1213 				  enum pp_clock_type type,
1214 				  char *buf,
1215 				  int *offset)
1216 {
1217 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1218 	int ret = 0;
1219 
1220 	if (!pp_funcs->emit_clock_levels)
1221 		return -ENOENT;
1222 
1223 	mutex_lock(&adev->pm.mutex);
1224 	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1225 					   type,
1226 					   buf,
1227 					   offset);
1228 	mutex_unlock(&adev->pm.mutex);
1229 
1230 	return ret;
1231 }
1232 
amdgpu_dpm_set_ppfeature_status(struct amdgpu_device * adev,uint64_t ppfeature_masks)1233 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1234 				    uint64_t ppfeature_masks)
1235 {
1236 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1237 	int ret = 0;
1238 
1239 	if (!pp_funcs->set_ppfeature_status)
1240 		return 0;
1241 
1242 	mutex_lock(&adev->pm.mutex);
1243 	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1244 					     ppfeature_masks);
1245 	mutex_unlock(&adev->pm.mutex);
1246 
1247 	return ret;
1248 }
1249 
amdgpu_dpm_get_ppfeature_status(struct amdgpu_device * adev,char * buf)1250 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1251 {
1252 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1253 	int ret = 0;
1254 
1255 	if (!pp_funcs->get_ppfeature_status)
1256 		return 0;
1257 
1258 	mutex_lock(&adev->pm.mutex);
1259 	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1260 					     buf);
1261 	mutex_unlock(&adev->pm.mutex);
1262 
1263 	return ret;
1264 }
1265 
amdgpu_dpm_force_clock_level(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t mask)1266 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1267 				 enum pp_clock_type type,
1268 				 uint32_t mask)
1269 {
1270 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1271 	int ret = 0;
1272 
1273 	if (!pp_funcs->force_clock_level)
1274 		return 0;
1275 
1276 	mutex_lock(&adev->pm.mutex);
1277 	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1278 					  type,
1279 					  mask);
1280 	mutex_unlock(&adev->pm.mutex);
1281 
1282 	return ret;
1283 }
1284 
amdgpu_dpm_get_sclk_od(struct amdgpu_device * adev)1285 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1286 {
1287 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1288 	int ret = 0;
1289 
1290 	if (!pp_funcs->get_sclk_od)
1291 		return -EOPNOTSUPP;
1292 
1293 	mutex_lock(&adev->pm.mutex);
1294 	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1295 	mutex_unlock(&adev->pm.mutex);
1296 
1297 	return ret;
1298 }
1299 
amdgpu_dpm_set_sclk_od(struct amdgpu_device * adev,uint32_t value)1300 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1301 {
1302 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1303 
1304 	if (is_support_sw_smu(adev))
1305 		return -EOPNOTSUPP;
1306 
1307 	mutex_lock(&adev->pm.mutex);
1308 	if (pp_funcs->set_sclk_od)
1309 		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1310 	mutex_unlock(&adev->pm.mutex);
1311 
1312 	if (amdgpu_dpm_dispatch_task(adev,
1313 				     AMD_PP_TASK_READJUST_POWER_STATE,
1314 				     NULL) == -EOPNOTSUPP) {
1315 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1316 		amdgpu_dpm_compute_clocks(adev);
1317 	}
1318 
1319 	return 0;
1320 }
1321 
amdgpu_dpm_get_mclk_od(struct amdgpu_device * adev)1322 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1323 {
1324 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1325 	int ret = 0;
1326 
1327 	if (!pp_funcs->get_mclk_od)
1328 		return -EOPNOTSUPP;
1329 
1330 	mutex_lock(&adev->pm.mutex);
1331 	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1332 	mutex_unlock(&adev->pm.mutex);
1333 
1334 	return ret;
1335 }
1336 
amdgpu_dpm_set_mclk_od(struct amdgpu_device * adev,uint32_t value)1337 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1338 {
1339 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1340 
1341 	if (is_support_sw_smu(adev))
1342 		return -EOPNOTSUPP;
1343 
1344 	mutex_lock(&adev->pm.mutex);
1345 	if (pp_funcs->set_mclk_od)
1346 		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1347 	mutex_unlock(&adev->pm.mutex);
1348 
1349 	if (amdgpu_dpm_dispatch_task(adev,
1350 				     AMD_PP_TASK_READJUST_POWER_STATE,
1351 				     NULL) == -EOPNOTSUPP) {
1352 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1353 		amdgpu_dpm_compute_clocks(adev);
1354 	}
1355 
1356 	return 0;
1357 }
1358 
amdgpu_dpm_get_power_profile_mode(struct amdgpu_device * adev,char * buf)1359 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1360 				      char *buf)
1361 {
1362 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1363 	int ret = 0;
1364 
1365 	if (!pp_funcs->get_power_profile_mode)
1366 		return -EOPNOTSUPP;
1367 
1368 	mutex_lock(&adev->pm.mutex);
1369 	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1370 					       buf);
1371 	mutex_unlock(&adev->pm.mutex);
1372 
1373 	return ret;
1374 }
1375 
amdgpu_dpm_set_power_profile_mode(struct amdgpu_device * adev,long * input,uint32_t size)1376 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1377 				      long *input, uint32_t size)
1378 {
1379 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1380 	int ret = 0;
1381 
1382 	if (!pp_funcs->set_power_profile_mode)
1383 		return 0;
1384 
1385 	mutex_lock(&adev->pm.mutex);
1386 	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1387 					       input,
1388 					       size);
1389 	mutex_unlock(&adev->pm.mutex);
1390 
1391 	return ret;
1392 }
1393 
amdgpu_dpm_get_gpu_metrics(struct amdgpu_device * adev,void ** table)1394 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1395 {
1396 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1397 	int ret = 0;
1398 
1399 	if (!pp_funcs->get_gpu_metrics)
1400 		return 0;
1401 
1402 	mutex_lock(&adev->pm.mutex);
1403 	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1404 					table);
1405 	mutex_unlock(&adev->pm.mutex);
1406 
1407 	return ret;
1408 }
1409 
amdgpu_dpm_get_pm_metrics(struct amdgpu_device * adev,void * pm_metrics,size_t size)1410 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1411 				  size_t size)
1412 {
1413 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1414 	int ret = 0;
1415 
1416 	if (!pp_funcs->get_pm_metrics)
1417 		return -EOPNOTSUPP;
1418 
1419 	mutex_lock(&adev->pm.mutex);
1420 	ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1421 				       size);
1422 	mutex_unlock(&adev->pm.mutex);
1423 
1424 	return ret;
1425 }
1426 
amdgpu_dpm_get_fan_control_mode(struct amdgpu_device * adev,uint32_t * fan_mode)1427 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1428 				    uint32_t *fan_mode)
1429 {
1430 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1431 	int ret = 0;
1432 
1433 	if (!pp_funcs->get_fan_control_mode)
1434 		return -EOPNOTSUPP;
1435 
1436 	mutex_lock(&adev->pm.mutex);
1437 	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1438 					     fan_mode);
1439 	mutex_unlock(&adev->pm.mutex);
1440 
1441 	return ret;
1442 }
1443 
amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device * adev,uint32_t speed)1444 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1445 				 uint32_t speed)
1446 {
1447 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1448 	int ret = 0;
1449 
1450 	if (!pp_funcs->set_fan_speed_pwm)
1451 		return -EOPNOTSUPP;
1452 
1453 	mutex_lock(&adev->pm.mutex);
1454 	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1455 					  speed);
1456 	mutex_unlock(&adev->pm.mutex);
1457 
1458 	return ret;
1459 }
1460 
amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device * adev,uint32_t * speed)1461 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1462 				 uint32_t *speed)
1463 {
1464 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1465 	int ret = 0;
1466 
1467 	if (!pp_funcs->get_fan_speed_pwm)
1468 		return -EOPNOTSUPP;
1469 
1470 	mutex_lock(&adev->pm.mutex);
1471 	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1472 					  speed);
1473 	mutex_unlock(&adev->pm.mutex);
1474 
1475 	return ret;
1476 }
1477 
amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device * adev,uint32_t * speed)1478 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1479 				 uint32_t *speed)
1480 {
1481 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1482 	int ret = 0;
1483 
1484 	if (!pp_funcs->get_fan_speed_rpm)
1485 		return -EOPNOTSUPP;
1486 
1487 	mutex_lock(&adev->pm.mutex);
1488 	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1489 					  speed);
1490 	mutex_unlock(&adev->pm.mutex);
1491 
1492 	return ret;
1493 }
1494 
amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device * adev,uint32_t speed)1495 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1496 				 uint32_t speed)
1497 {
1498 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1499 	int ret = 0;
1500 
1501 	if (!pp_funcs->set_fan_speed_rpm)
1502 		return -EOPNOTSUPP;
1503 
1504 	mutex_lock(&adev->pm.mutex);
1505 	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1506 					  speed);
1507 	mutex_unlock(&adev->pm.mutex);
1508 
1509 	return ret;
1510 }
1511 
amdgpu_dpm_set_fan_control_mode(struct amdgpu_device * adev,uint32_t mode)1512 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1513 				    uint32_t mode)
1514 {
1515 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1516 	int ret = 0;
1517 
1518 	if (!pp_funcs->set_fan_control_mode)
1519 		return -EOPNOTSUPP;
1520 
1521 	mutex_lock(&adev->pm.mutex);
1522 	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1523 					     mode);
1524 	mutex_unlock(&adev->pm.mutex);
1525 
1526 	return ret;
1527 }
1528 
amdgpu_dpm_get_power_limit(struct amdgpu_device * adev,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)1529 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1530 			       uint32_t *limit,
1531 			       enum pp_power_limit_level pp_limit_level,
1532 			       enum pp_power_type power_type)
1533 {
1534 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1535 	int ret = 0;
1536 
1537 	if (!pp_funcs->get_power_limit)
1538 		return -ENODATA;
1539 
1540 	mutex_lock(&adev->pm.mutex);
1541 	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1542 					limit,
1543 					pp_limit_level,
1544 					power_type);
1545 	mutex_unlock(&adev->pm.mutex);
1546 
1547 	return ret;
1548 }
1549 
amdgpu_dpm_set_power_limit(struct amdgpu_device * adev,uint32_t limit)1550 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1551 			       uint32_t limit)
1552 {
1553 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1554 	int ret = 0;
1555 
1556 	if (!pp_funcs->set_power_limit)
1557 		return -EINVAL;
1558 
1559 	mutex_lock(&adev->pm.mutex);
1560 	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1561 					limit);
1562 	mutex_unlock(&adev->pm.mutex);
1563 
1564 	return ret;
1565 }
1566 
amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device * adev)1567 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1568 {
1569 	bool cclk_dpm_supported = false;
1570 
1571 	if (!is_support_sw_smu(adev))
1572 		return false;
1573 
1574 	mutex_lock(&adev->pm.mutex);
1575 	cclk_dpm_supported = is_support_cclk_dpm(adev);
1576 	mutex_unlock(&adev->pm.mutex);
1577 
1578 	return (int)cclk_dpm_supported;
1579 }
1580 
amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device * adev,struct seq_file * m)1581 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1582 						       struct seq_file *m)
1583 {
1584 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1585 
1586 	if (!pp_funcs->debugfs_print_current_performance_level)
1587 		return -EOPNOTSUPP;
1588 
1589 	mutex_lock(&adev->pm.mutex);
1590 	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1591 							  m);
1592 	mutex_unlock(&adev->pm.mutex);
1593 
1594 	return 0;
1595 }
1596 
amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device * adev,void ** addr,size_t * size)1597 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1598 				       void **addr,
1599 				       size_t *size)
1600 {
1601 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1602 	int ret = 0;
1603 
1604 	if (!pp_funcs->get_smu_prv_buf_details)
1605 		return -ENOSYS;
1606 
1607 	mutex_lock(&adev->pm.mutex);
1608 	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1609 						addr,
1610 						size);
1611 	mutex_unlock(&adev->pm.mutex);
1612 
1613 	return ret;
1614 }
1615 
amdgpu_dpm_is_overdrive_supported(struct amdgpu_device * adev)1616 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1617 {
1618 	if (is_support_sw_smu(adev)) {
1619 		struct smu_context *smu = adev->powerplay.pp_handle;
1620 
1621 		return (smu->od_enabled || smu->is_apu);
1622 	} else {
1623 		struct pp_hwmgr *hwmgr;
1624 
1625 		/*
1626 		 * dpm on some legacy asics don't carry od_enabled member
1627 		 * as its pp_handle is casted directly from adev.
1628 		 */
1629 		if (amdgpu_dpm_is_legacy_dpm(adev))
1630 			return false;
1631 
1632 		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1633 
1634 		return hwmgr->od_enabled;
1635 	}
1636 }
1637 
amdgpu_dpm_set_pp_table(struct amdgpu_device * adev,const char * buf,size_t size)1638 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1639 			    const char *buf,
1640 			    size_t size)
1641 {
1642 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1643 	int ret = 0;
1644 
1645 	if (!pp_funcs->set_pp_table)
1646 		return -EOPNOTSUPP;
1647 
1648 	mutex_lock(&adev->pm.mutex);
1649 	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1650 				     buf,
1651 				     size);
1652 	mutex_unlock(&adev->pm.mutex);
1653 
1654 	return ret;
1655 }
1656 
amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device * adev)1657 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1658 {
1659 	struct smu_context *smu = adev->powerplay.pp_handle;
1660 
1661 	if (!is_support_sw_smu(adev))
1662 		return INT_MAX;
1663 
1664 	return smu->cpu_core_num;
1665 }
1666 
amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device * adev)1667 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1668 {
1669 	if (!is_support_sw_smu(adev))
1670 		return;
1671 
1672 	amdgpu_smu_stb_debug_fs_init(adev);
1673 }
1674 
amdgpu_dpm_display_configuration_change(struct amdgpu_device * adev,const struct amd_pp_display_configuration * input)1675 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1676 					    const struct amd_pp_display_configuration *input)
1677 {
1678 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1679 	int ret = 0;
1680 
1681 	if (!pp_funcs->display_configuration_change)
1682 		return 0;
1683 
1684 	mutex_lock(&adev->pm.mutex);
1685 	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1686 						     input);
1687 	mutex_unlock(&adev->pm.mutex);
1688 
1689 	return ret;
1690 }
1691 
amdgpu_dpm_get_clock_by_type(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1692 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1693 				 enum amd_pp_clock_type type,
1694 				 struct amd_pp_clocks *clocks)
1695 {
1696 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1697 	int ret = 0;
1698 
1699 	if (!pp_funcs->get_clock_by_type)
1700 		return 0;
1701 
1702 	mutex_lock(&adev->pm.mutex);
1703 	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1704 					  type,
1705 					  clocks);
1706 	mutex_unlock(&adev->pm.mutex);
1707 
1708 	return ret;
1709 }
1710 
amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device * adev,struct amd_pp_simple_clock_info * clocks)1711 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1712 						struct amd_pp_simple_clock_info *clocks)
1713 {
1714 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1715 	int ret = 0;
1716 
1717 	if (!pp_funcs->get_display_mode_validation_clocks)
1718 		return 0;
1719 
1720 	mutex_lock(&adev->pm.mutex);
1721 	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1722 							   clocks);
1723 	mutex_unlock(&adev->pm.mutex);
1724 
1725 	return ret;
1726 }
1727 
amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1728 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1729 					      enum amd_pp_clock_type type,
1730 					      struct pp_clock_levels_with_latency *clocks)
1731 {
1732 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1733 	int ret = 0;
1734 
1735 	if (!pp_funcs->get_clock_by_type_with_latency)
1736 		return 0;
1737 
1738 	mutex_lock(&adev->pm.mutex);
1739 	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1740 						       type,
1741 						       clocks);
1742 	mutex_unlock(&adev->pm.mutex);
1743 
1744 	return ret;
1745 }
1746 
amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1747 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1748 					      enum amd_pp_clock_type type,
1749 					      struct pp_clock_levels_with_voltage *clocks)
1750 {
1751 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1752 	int ret = 0;
1753 
1754 	if (!pp_funcs->get_clock_by_type_with_voltage)
1755 		return 0;
1756 
1757 	mutex_lock(&adev->pm.mutex);
1758 	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1759 						       type,
1760 						       clocks);
1761 	mutex_unlock(&adev->pm.mutex);
1762 
1763 	return ret;
1764 }
1765 
amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device * adev,void * clock_ranges)1766 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1767 					       void *clock_ranges)
1768 {
1769 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1770 	int ret = 0;
1771 
1772 	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1773 		return -EOPNOTSUPP;
1774 
1775 	mutex_lock(&adev->pm.mutex);
1776 	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1777 							 clock_ranges);
1778 	mutex_unlock(&adev->pm.mutex);
1779 
1780 	return ret;
1781 }
1782 
amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device * adev,struct pp_display_clock_request * clock)1783 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1784 					     struct pp_display_clock_request *clock)
1785 {
1786 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1787 	int ret = 0;
1788 
1789 	if (!pp_funcs->display_clock_voltage_request)
1790 		return -EOPNOTSUPP;
1791 
1792 	mutex_lock(&adev->pm.mutex);
1793 	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1794 						      clock);
1795 	mutex_unlock(&adev->pm.mutex);
1796 
1797 	return ret;
1798 }
1799 
amdgpu_dpm_get_current_clocks(struct amdgpu_device * adev,struct amd_pp_clock_info * clocks)1800 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1801 				  struct amd_pp_clock_info *clocks)
1802 {
1803 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1804 	int ret = 0;
1805 
1806 	if (!pp_funcs->get_current_clocks)
1807 		return -EOPNOTSUPP;
1808 
1809 	mutex_lock(&adev->pm.mutex);
1810 	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1811 					   clocks);
1812 	mutex_unlock(&adev->pm.mutex);
1813 
1814 	return ret;
1815 }
1816 
amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device * adev)1817 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1818 {
1819 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1820 
1821 	if (!pp_funcs->notify_smu_enable_pwe)
1822 		return;
1823 
1824 	mutex_lock(&adev->pm.mutex);
1825 	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1826 	mutex_unlock(&adev->pm.mutex);
1827 }
1828 
amdgpu_dpm_set_active_display_count(struct amdgpu_device * adev,uint32_t count)1829 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1830 					uint32_t count)
1831 {
1832 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1833 	int ret = 0;
1834 
1835 	if (!pp_funcs->set_active_display_count)
1836 		return -EOPNOTSUPP;
1837 
1838 	mutex_lock(&adev->pm.mutex);
1839 	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1840 						 count);
1841 	mutex_unlock(&adev->pm.mutex);
1842 
1843 	return ret;
1844 }
1845 
amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device * adev,uint32_t clock)1846 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1847 					  uint32_t clock)
1848 {
1849 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1850 	int ret = 0;
1851 
1852 	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1853 		return -EOPNOTSUPP;
1854 
1855 	mutex_lock(&adev->pm.mutex);
1856 	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1857 						   clock);
1858 	mutex_unlock(&adev->pm.mutex);
1859 
1860 	return ret;
1861 }
1862 
amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1863 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1864 					     uint32_t clock)
1865 {
1866 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1867 
1868 	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1869 		return;
1870 
1871 	mutex_lock(&adev->pm.mutex);
1872 	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1873 					       clock);
1874 	mutex_unlock(&adev->pm.mutex);
1875 }
1876 
amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1877 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1878 					  uint32_t clock)
1879 {
1880 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1881 
1882 	if (!pp_funcs->set_hard_min_fclk_by_freq)
1883 		return;
1884 
1885 	mutex_lock(&adev->pm.mutex);
1886 	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1887 					    clock);
1888 	mutex_unlock(&adev->pm.mutex);
1889 }
1890 
amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device * adev,bool disable_memory_clock_switch)1891 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1892 						   bool disable_memory_clock_switch)
1893 {
1894 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1895 	int ret = 0;
1896 
1897 	if (!pp_funcs->display_disable_memory_clock_switch)
1898 		return 0;
1899 
1900 	mutex_lock(&adev->pm.mutex);
1901 	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1902 							    disable_memory_clock_switch);
1903 	mutex_unlock(&adev->pm.mutex);
1904 
1905 	return ret;
1906 }
1907 
amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device * adev,struct pp_smu_nv_clock_table * max_clocks)1908 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1909 						struct pp_smu_nv_clock_table *max_clocks)
1910 {
1911 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1912 	int ret = 0;
1913 
1914 	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1915 		return -EOPNOTSUPP;
1916 
1917 	mutex_lock(&adev->pm.mutex);
1918 	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1919 							 max_clocks);
1920 	mutex_unlock(&adev->pm.mutex);
1921 
1922 	return ret;
1923 }
1924 
amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device * adev,unsigned int * clock_values_in_khz,unsigned int * num_states)1925 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1926 						  unsigned int *clock_values_in_khz,
1927 						  unsigned int *num_states)
1928 {
1929 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1930 	int ret = 0;
1931 
1932 	if (!pp_funcs->get_uclk_dpm_states)
1933 		return -EOPNOTSUPP;
1934 
1935 	mutex_lock(&adev->pm.mutex);
1936 	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1937 					    clock_values_in_khz,
1938 					    num_states);
1939 	mutex_unlock(&adev->pm.mutex);
1940 
1941 	return ret;
1942 }
1943 
amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device * adev,struct dpm_clocks * clock_table)1944 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1945 				   struct dpm_clocks *clock_table)
1946 {
1947 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1948 	int ret = 0;
1949 
1950 	if (!pp_funcs->get_dpm_clock_table)
1951 		return -EOPNOTSUPP;
1952 
1953 	mutex_lock(&adev->pm.mutex);
1954 	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1955 					    clock_table);
1956 	mutex_unlock(&adev->pm.mutex);
1957 
1958 	return ret;
1959 }
1960