xref: /linux/drivers/gpu/drm/amd/pm/amdgpu_dpm.c (revision 4327db89f5e02458001b9c296a961265b8613395)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35 
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38 
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40 
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 	int ret = 0;
45 
46 	if (!pp_funcs->get_sclk)
47 		return 0;
48 
49 	mutex_lock(&adev->pm.mutex);
50 	ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 				 low);
52 	mutex_unlock(&adev->pm.mutex);
53 
54 	return ret;
55 }
56 
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 	int ret = 0;
61 
62 	if (!pp_funcs->get_mclk)
63 		return 0;
64 
65 	mutex_lock(&adev->pm.mutex);
66 	ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 				 low);
68 	mutex_unlock(&adev->pm.mutex);
69 
70 	return ret;
71 }
72 
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate,int inst)73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
74 				       uint32_t block_type,
75 				       bool gate,
76 				       int inst)
77 {
78 	int ret = 0;
79 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
80 	enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
81 	bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
82 
83 	mutex_lock(&adev->pm.mutex);
84 
85 	if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
86 			(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
87 		dev_dbg(adev->dev, "IP block%d already in the target %s state!",
88 				block_type, gate ? "gate" : "ungate");
89 		goto out_unlock;
90 	}
91 
92 	switch (block_type) {
93 	case AMD_IP_BLOCK_TYPE_UVD:
94 	case AMD_IP_BLOCK_TYPE_VCE:
95 	case AMD_IP_BLOCK_TYPE_GFX:
96 	case AMD_IP_BLOCK_TYPE_SDMA:
97 	case AMD_IP_BLOCK_TYPE_JPEG:
98 	case AMD_IP_BLOCK_TYPE_GMC:
99 	case AMD_IP_BLOCK_TYPE_ACP:
100 	case AMD_IP_BLOCK_TYPE_VPE:
101 	case AMD_IP_BLOCK_TYPE_ISP:
102 		if (pp_funcs && pp_funcs->set_powergating_by_smu)
103 			ret = (pp_funcs->set_powergating_by_smu(
104 				(adev)->powerplay.pp_handle, block_type, gate, 0));
105 		break;
106 	case AMD_IP_BLOCK_TYPE_VCN:
107 		if (pp_funcs && pp_funcs->set_powergating_by_smu)
108 			ret = (pp_funcs->set_powergating_by_smu(
109 				(adev)->powerplay.pp_handle, block_type, gate, inst));
110 		break;
111 	default:
112 		break;
113 	}
114 
115 	if (!ret)
116 		atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
117 
118 out_unlock:
119 	mutex_unlock(&adev->pm.mutex);
120 
121 	return ret;
122 }
123 
amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device * adev)124 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
125 {
126 	struct smu_context *smu = adev->powerplay.pp_handle;
127 	int ret = -EOPNOTSUPP;
128 
129 	mutex_lock(&adev->pm.mutex);
130 	ret = smu_set_gfx_power_up_by_imu(smu);
131 	mutex_unlock(&adev->pm.mutex);
132 
133 	msleep(10);
134 
135 	return ret;
136 }
137 
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)138 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
139 {
140 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
141 	void *pp_handle = adev->powerplay.pp_handle;
142 	int ret = 0;
143 
144 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
145 		return -ENOENT;
146 
147 	mutex_lock(&adev->pm.mutex);
148 
149 	/* enter BACO state */
150 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
151 
152 	mutex_unlock(&adev->pm.mutex);
153 
154 	return ret;
155 }
156 
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)157 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
158 {
159 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
160 	void *pp_handle = adev->powerplay.pp_handle;
161 	int ret = 0;
162 
163 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
164 		return -ENOENT;
165 
166 	mutex_lock(&adev->pm.mutex);
167 
168 	/* exit BACO state */
169 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
170 
171 	mutex_unlock(&adev->pm.mutex);
172 
173 	return ret;
174 }
175 
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)176 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
177 			     enum pp_mp1_state mp1_state)
178 {
179 	int ret = 0;
180 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
181 
182 	if (mp1_state == PP_MP1_STATE_FLR) {
183 		/* VF lost access to SMU */
184 		if (amdgpu_sriov_vf(adev))
185 			adev->pm.dpm_enabled = false;
186 	} else if (pp_funcs && pp_funcs->set_mp1_state) {
187 		mutex_lock(&adev->pm.mutex);
188 
189 		ret = pp_funcs->set_mp1_state(
190 				adev->powerplay.pp_handle,
191 				mp1_state);
192 
193 		mutex_unlock(&adev->pm.mutex);
194 	}
195 
196 	return ret;
197 }
198 
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)199 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
200 {
201 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
202 	void *pp_handle = adev->powerplay.pp_handle;
203 	int ret;
204 
205 	if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
206 		return 0;
207 	/* Don't use baco for reset in S3.
208 	 * This is a workaround for some platforms
209 	 * where entering BACO during suspend
210 	 * seems to cause reboots or hangs.
211 	 * This might be related to the fact that BACO controls
212 	 * power to the whole GPU including devices like audio and USB.
213 	 * Powering down/up everything may adversely affect these other
214 	 * devices.  Needs more investigation.
215 	 */
216 	if (adev->in_s3)
217 		return 0;
218 
219 	mutex_lock(&adev->pm.mutex);
220 
221 	ret = pp_funcs->get_asic_baco_capability(pp_handle);
222 
223 	mutex_unlock(&adev->pm.mutex);
224 
225 	return ret;
226 }
227 
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)228 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
229 {
230 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
231 	void *pp_handle = adev->powerplay.pp_handle;
232 	int ret = 0;
233 
234 	if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
235 		return -ENOENT;
236 
237 	mutex_lock(&adev->pm.mutex);
238 
239 	ret = pp_funcs->asic_reset_mode_2(pp_handle);
240 
241 	mutex_unlock(&adev->pm.mutex);
242 
243 	return ret;
244 }
245 
amdgpu_dpm_enable_gfx_features(struct amdgpu_device * adev)246 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
247 {
248 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
249 	void *pp_handle = adev->powerplay.pp_handle;
250 	int ret = 0;
251 
252 	if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
253 		return -ENOENT;
254 
255 	mutex_lock(&adev->pm.mutex);
256 
257 	ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
258 
259 	mutex_unlock(&adev->pm.mutex);
260 
261 	return ret;
262 }
263 
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)264 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
265 {
266 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
267 	void *pp_handle = adev->powerplay.pp_handle;
268 	int ret = 0;
269 
270 	if (!pp_funcs || !pp_funcs->set_asic_baco_state)
271 		return -ENOENT;
272 
273 	mutex_lock(&adev->pm.mutex);
274 
275 	/* enter BACO state */
276 	ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
277 	if (ret)
278 		goto out;
279 
280 	/* exit BACO state */
281 	ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
282 
283 out:
284 	mutex_unlock(&adev->pm.mutex);
285 	return ret;
286 }
287 
amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device * adev)288 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
289 {
290 	struct smu_context *smu = adev->powerplay.pp_handle;
291 	bool support_mode1_reset = false;
292 
293 	if (is_support_sw_smu(adev)) {
294 		mutex_lock(&adev->pm.mutex);
295 		support_mode1_reset = smu_mode1_reset_is_support(smu);
296 		mutex_unlock(&adev->pm.mutex);
297 	}
298 
299 	return support_mode1_reset;
300 }
301 
amdgpu_dpm_mode1_reset(struct amdgpu_device * adev)302 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
303 {
304 	struct smu_context *smu = adev->powerplay.pp_handle;
305 	int ret = -EOPNOTSUPP;
306 
307 	if (is_support_sw_smu(adev)) {
308 		mutex_lock(&adev->pm.mutex);
309 		ret = smu_mode1_reset(smu);
310 		mutex_unlock(&adev->pm.mutex);
311 	}
312 
313 	return ret;
314 }
315 
amdgpu_dpm_is_link_reset_supported(struct amdgpu_device * adev)316 bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev)
317 {
318 	struct smu_context *smu = adev->powerplay.pp_handle;
319 	bool support_link_reset = false;
320 
321 	if (is_support_sw_smu(adev)) {
322 		mutex_lock(&adev->pm.mutex);
323 		support_link_reset = smu_link_reset_is_support(smu);
324 		mutex_unlock(&adev->pm.mutex);
325 	}
326 
327 	return support_link_reset;
328 }
329 
amdgpu_dpm_link_reset(struct amdgpu_device * adev)330 int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
331 {
332 	struct smu_context *smu = adev->powerplay.pp_handle;
333 	int ret = -EOPNOTSUPP;
334 
335 	if (is_support_sw_smu(adev)) {
336 		mutex_lock(&adev->pm.mutex);
337 		ret = smu_link_reset(smu);
338 		mutex_unlock(&adev->pm.mutex);
339 	}
340 
341 	return ret;
342 }
343 
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)344 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
345 				    enum PP_SMC_POWER_PROFILE type,
346 				    bool en)
347 {
348 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
349 	int ret = 0;
350 
351 	if (amdgpu_sriov_vf(adev))
352 		return 0;
353 
354 	if (pp_funcs && pp_funcs->switch_power_profile) {
355 		mutex_lock(&adev->pm.mutex);
356 		ret = pp_funcs->switch_power_profile(
357 			adev->powerplay.pp_handle, type, en);
358 		mutex_unlock(&adev->pm.mutex);
359 	}
360 
361 	return ret;
362 }
363 
amdgpu_dpm_pause_power_profile(struct amdgpu_device * adev,bool pause)364 int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
365 				   bool pause)
366 {
367 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
368 	int ret = 0;
369 
370 	if (amdgpu_sriov_vf(adev))
371 		return 0;
372 
373 	if (pp_funcs && pp_funcs->pause_power_profile) {
374 		mutex_lock(&adev->pm.mutex);
375 		ret = pp_funcs->pause_power_profile(
376 			adev->powerplay.pp_handle, pause);
377 		mutex_unlock(&adev->pm.mutex);
378 	}
379 
380 	return ret;
381 }
382 
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)383 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
384 			       uint32_t pstate)
385 {
386 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
387 	int ret = 0;
388 
389 	if (pp_funcs && pp_funcs->set_xgmi_pstate) {
390 		mutex_lock(&adev->pm.mutex);
391 		ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
392 								pstate);
393 		mutex_unlock(&adev->pm.mutex);
394 	}
395 
396 	return ret;
397 }
398 
amdgpu_dpm_set_df_cstate(struct amdgpu_device * adev,uint32_t cstate)399 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
400 			     uint32_t cstate)
401 {
402 	int ret = 0;
403 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
404 	void *pp_handle = adev->powerplay.pp_handle;
405 
406 	if (pp_funcs && pp_funcs->set_df_cstate) {
407 		mutex_lock(&adev->pm.mutex);
408 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
409 		mutex_unlock(&adev->pm.mutex);
410 	}
411 
412 	return ret;
413 }
414 
amdgpu_dpm_get_pm_policy_info(struct amdgpu_device * adev,enum pp_pm_policy p_type,char * buf)415 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
416 				      enum pp_pm_policy p_type, char *buf)
417 {
418 	struct smu_context *smu = adev->powerplay.pp_handle;
419 	int ret = -EOPNOTSUPP;
420 
421 	if (is_support_sw_smu(adev)) {
422 		mutex_lock(&adev->pm.mutex);
423 		ret = smu_get_pm_policy_info(smu, p_type, buf);
424 		mutex_unlock(&adev->pm.mutex);
425 	}
426 
427 	return ret;
428 }
429 
amdgpu_dpm_set_pm_policy(struct amdgpu_device * adev,int policy_type,int policy_level)430 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
431 			     int policy_level)
432 {
433 	struct smu_context *smu = adev->powerplay.pp_handle;
434 	int ret = -EOPNOTSUPP;
435 
436 	if (is_support_sw_smu(adev)) {
437 		mutex_lock(&adev->pm.mutex);
438 		ret = smu_set_pm_policy(smu, policy_type, policy_level);
439 		mutex_unlock(&adev->pm.mutex);
440 	}
441 
442 	return ret;
443 }
444 
amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device * adev)445 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
446 {
447 	void *pp_handle = adev->powerplay.pp_handle;
448 	const struct amd_pm_funcs *pp_funcs =
449 			adev->powerplay.pp_funcs;
450 	int ret = 0;
451 
452 	if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
453 		mutex_lock(&adev->pm.mutex);
454 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
455 		mutex_unlock(&adev->pm.mutex);
456 	}
457 
458 	return ret;
459 }
460 
amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device * adev,uint32_t msg_id)461 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
462 				      uint32_t msg_id)
463 {
464 	void *pp_handle = adev->powerplay.pp_handle;
465 	const struct amd_pm_funcs *pp_funcs =
466 			adev->powerplay.pp_funcs;
467 	int ret = 0;
468 
469 	if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
470 		mutex_lock(&adev->pm.mutex);
471 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
472 						       msg_id);
473 		mutex_unlock(&adev->pm.mutex);
474 	}
475 
476 	return ret;
477 }
478 
amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device * adev,bool acquire)479 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
480 				  bool acquire)
481 {
482 	void *pp_handle = adev->powerplay.pp_handle;
483 	const struct amd_pm_funcs *pp_funcs =
484 			adev->powerplay.pp_funcs;
485 	int ret = -EOPNOTSUPP;
486 
487 	if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
488 		mutex_lock(&adev->pm.mutex);
489 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
490 						   acquire);
491 		mutex_unlock(&adev->pm.mutex);
492 	}
493 
494 	return ret;
495 }
496 
amdgpu_pm_acpi_event_handler(struct amdgpu_device * adev)497 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
498 {
499 	if (adev->pm.dpm_enabled) {
500 		mutex_lock(&adev->pm.mutex);
501 		if (power_supply_is_system_supplied() > 0)
502 			adev->pm.ac_power = true;
503 		else
504 			adev->pm.ac_power = false;
505 
506 		if (adev->powerplay.pp_funcs &&
507 		    adev->powerplay.pp_funcs->enable_bapm)
508 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
509 
510 		if (is_support_sw_smu(adev))
511 			smu_set_ac_dc(adev->powerplay.pp_handle);
512 
513 		mutex_unlock(&adev->pm.mutex);
514 	}
515 }
516 
amdgpu_dpm_read_sensor(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * data,uint32_t * size)517 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
518 			   void *data, uint32_t *size)
519 {
520 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
521 	int ret = -EINVAL;
522 
523 	if (!data || !size)
524 		return -EINVAL;
525 
526 	if (pp_funcs && pp_funcs->read_sensor) {
527 		mutex_lock(&adev->pm.mutex);
528 		ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
529 					    sensor,
530 					    data,
531 					    size);
532 		mutex_unlock(&adev->pm.mutex);
533 	}
534 
535 	return ret;
536 }
537 
amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device * adev,uint32_t * limit)538 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
539 {
540 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
541 	int ret = -EOPNOTSUPP;
542 
543 	if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
544 		mutex_lock(&adev->pm.mutex);
545 		ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
546 		mutex_unlock(&adev->pm.mutex);
547 	}
548 
549 	return ret;
550 }
551 
amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device * adev,uint32_t limit)552 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
553 {
554 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
555 	int ret = -EOPNOTSUPP;
556 
557 	if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
558 		mutex_lock(&adev->pm.mutex);
559 		ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
560 		mutex_unlock(&adev->pm.mutex);
561 	}
562 
563 	return ret;
564 }
565 
amdgpu_dpm_compute_clocks(struct amdgpu_device * adev)566 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
567 {
568 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
569 	int i;
570 
571 	if (!adev->pm.dpm_enabled)
572 		return;
573 
574 	if (!pp_funcs->pm_compute_clocks)
575 		return;
576 
577 	if (adev->mode_info.num_crtc)
578 		amdgpu_display_bandwidth_update(adev);
579 
580 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
581 		struct amdgpu_ring *ring = adev->rings[i];
582 		if (ring && ring->sched.ready)
583 			amdgpu_fence_wait_empty(ring);
584 	}
585 
586 	mutex_lock(&adev->pm.mutex);
587 	pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
588 	mutex_unlock(&adev->pm.mutex);
589 }
590 
amdgpu_dpm_enable_uvd(struct amdgpu_device * adev,bool enable)591 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
592 {
593 	int ret = 0;
594 
595 	if (adev->family == AMDGPU_FAMILY_SI) {
596 		mutex_lock(&adev->pm.mutex);
597 		if (enable) {
598 			adev->pm.dpm.uvd_active = true;
599 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
600 		} else {
601 			adev->pm.dpm.uvd_active = false;
602 		}
603 		mutex_unlock(&adev->pm.mutex);
604 
605 		amdgpu_dpm_compute_clocks(adev);
606 		return;
607 	}
608 
609 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
610 	if (ret)
611 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
612 			  enable ? "enable" : "disable", ret);
613 }
614 
amdgpu_dpm_enable_vcn(struct amdgpu_device * adev,bool enable,int inst)615 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
616 {
617 	int ret = 0;
618 
619 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
620 	if (ret)
621 		DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
622 			  enable ? "enable" : "disable", ret);
623 }
624 
amdgpu_dpm_enable_vce(struct amdgpu_device * adev,bool enable)625 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
626 {
627 	int ret = 0;
628 
629 	if (adev->family == AMDGPU_FAMILY_SI) {
630 		mutex_lock(&adev->pm.mutex);
631 		if (enable) {
632 			adev->pm.dpm.vce_active = true;
633 			/* XXX select vce level based on ring/task */
634 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
635 		} else {
636 			adev->pm.dpm.vce_active = false;
637 		}
638 		mutex_unlock(&adev->pm.mutex);
639 
640 		amdgpu_dpm_compute_clocks(adev);
641 		return;
642 	}
643 
644 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
645 	if (ret)
646 		DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
647 			  enable ? "enable" : "disable", ret);
648 }
649 
amdgpu_dpm_enable_jpeg(struct amdgpu_device * adev,bool enable)650 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
651 {
652 	int ret = 0;
653 
654 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
655 	if (ret)
656 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
657 			  enable ? "enable" : "disable", ret);
658 }
659 
amdgpu_dpm_enable_vpe(struct amdgpu_device * adev,bool enable)660 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
661 {
662 	int ret = 0;
663 
664 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
665 	if (ret)
666 		DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
667 			  enable ? "enable" : "disable", ret);
668 }
669 
amdgpu_pm_load_smu_firmware(struct amdgpu_device * adev,uint32_t * smu_version)670 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
671 {
672 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
673 	int r = 0;
674 
675 	if (!pp_funcs || !pp_funcs->load_firmware ||
676 	    (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
677 		return 0;
678 
679 	mutex_lock(&adev->pm.mutex);
680 	r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
681 	if (r) {
682 		pr_err("smu firmware loading failed\n");
683 		goto out;
684 	}
685 
686 	if (smu_version)
687 		*smu_version = adev->pm.fw_version;
688 
689 out:
690 	mutex_unlock(&adev->pm.mutex);
691 	return r;
692 }
693 
amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device * adev,bool enable)694 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
695 {
696 	int ret = 0;
697 
698 	if (is_support_sw_smu(adev)) {
699 		mutex_lock(&adev->pm.mutex);
700 		ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
701 						 enable);
702 		mutex_unlock(&adev->pm.mutex);
703 	}
704 
705 	return ret;
706 }
707 
amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device * adev,uint32_t size)708 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
709 {
710 	struct smu_context *smu = adev->powerplay.pp_handle;
711 	int ret = 0;
712 
713 	if (!is_support_sw_smu(adev))
714 		return -EOPNOTSUPP;
715 
716 	mutex_lock(&adev->pm.mutex);
717 	ret = smu_send_hbm_bad_pages_num(smu, size);
718 	mutex_unlock(&adev->pm.mutex);
719 
720 	return ret;
721 }
722 
amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device * adev,uint32_t size)723 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
724 {
725 	struct smu_context *smu = adev->powerplay.pp_handle;
726 	int ret = 0;
727 
728 	if (!is_support_sw_smu(adev))
729 		return -EOPNOTSUPP;
730 
731 	mutex_lock(&adev->pm.mutex);
732 	ret = smu_send_hbm_bad_channel_flag(smu, size);
733 	mutex_unlock(&adev->pm.mutex);
734 
735 	return ret;
736 }
737 
amdgpu_dpm_send_rma_reason(struct amdgpu_device * adev)738 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
739 {
740 	struct smu_context *smu = adev->powerplay.pp_handle;
741 	int ret;
742 
743 	if (!is_support_sw_smu(adev))
744 		return -EOPNOTSUPP;
745 
746 	mutex_lock(&adev->pm.mutex);
747 	ret = smu_send_rma_reason(smu);
748 	mutex_unlock(&adev->pm.mutex);
749 
750 	return ret;
751 }
752 
753 /**
754  * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported
755  * @adev: amdgpu_device pointer
756  *
757  * This function checks if the SMU supports resetting the SDMA engine.
758  * It returns false if the hardware does not support software SMU or
759  * if the feature is not supported.
760  */
amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device * adev)761 bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
762 {
763 	struct smu_context *smu = adev->powerplay.pp_handle;
764 	bool ret;
765 
766 	if (!is_support_sw_smu(adev))
767 		return false;
768 
769 	mutex_lock(&adev->pm.mutex);
770 	ret = smu_reset_sdma_is_supported(smu);
771 	mutex_unlock(&adev->pm.mutex);
772 
773 	return ret;
774 }
775 
amdgpu_dpm_reset_sdma(struct amdgpu_device * adev,uint32_t inst_mask)776 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
777 {
778 	struct smu_context *smu = adev->powerplay.pp_handle;
779 	int ret;
780 
781 	if (!is_support_sw_smu(adev))
782 		return -EOPNOTSUPP;
783 
784 	mutex_lock(&adev->pm.mutex);
785 	ret = smu_reset_sdma(smu, inst_mask);
786 	mutex_unlock(&adev->pm.mutex);
787 
788 	return ret;
789 }
790 
amdgpu_dpm_reset_vcn(struct amdgpu_device * adev,uint32_t inst_mask)791 int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
792 {
793 	struct smu_context *smu = adev->powerplay.pp_handle;
794 	int ret;
795 
796 	if (!is_support_sw_smu(adev))
797 		return -EOPNOTSUPP;
798 
799 	mutex_lock(&adev->pm.mutex);
800 	ret = smu_reset_vcn(smu, inst_mask);
801 	mutex_unlock(&adev->pm.mutex);
802 
803 	return ret;
804 }
805 
amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device * adev)806 bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev)
807 {
808 	struct smu_context *smu = adev->powerplay.pp_handle;
809 	bool ret;
810 
811 	if (!is_support_sw_smu(adev))
812 		return false;
813 
814 	mutex_lock(&adev->pm.mutex);
815 	ret = smu_reset_vcn_is_supported(smu);
816 	mutex_unlock(&adev->pm.mutex);
817 
818 	return ret;
819 }
820 
amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t * min,uint32_t * max)821 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
822 				  enum pp_clock_type type,
823 				  uint32_t *min,
824 				  uint32_t *max)
825 {
826 	int ret = 0;
827 
828 	if (type != PP_SCLK)
829 		return -EINVAL;
830 
831 	if (!is_support_sw_smu(adev))
832 		return -EOPNOTSUPP;
833 
834 	mutex_lock(&adev->pm.mutex);
835 	ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
836 				     SMU_SCLK,
837 				     min,
838 				     max);
839 	mutex_unlock(&adev->pm.mutex);
840 
841 	return ret;
842 }
843 
amdgpu_dpm_set_soft_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t min,uint32_t max)844 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
845 				   enum pp_clock_type type,
846 				   uint32_t min,
847 				   uint32_t max)
848 {
849 	struct smu_context *smu = adev->powerplay.pp_handle;
850 
851 	if (!is_support_sw_smu(adev))
852 		return -EOPNOTSUPP;
853 
854 	guard(mutex)(&adev->pm.mutex);
855 
856 	return smu_set_soft_freq_range(smu,
857 				      type,
858 				      min,
859 				      max);
860 }
861 
amdgpu_dpm_write_watermarks_table(struct amdgpu_device * adev)862 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
863 {
864 	struct smu_context *smu = adev->powerplay.pp_handle;
865 	int ret = 0;
866 
867 	if (!is_support_sw_smu(adev))
868 		return 0;
869 
870 	mutex_lock(&adev->pm.mutex);
871 	ret = smu_write_watermarks_table(smu);
872 	mutex_unlock(&adev->pm.mutex);
873 
874 	return ret;
875 }
876 
amdgpu_dpm_wait_for_event(struct amdgpu_device * adev,enum smu_event_type event,uint64_t event_arg)877 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
878 			      enum smu_event_type event,
879 			      uint64_t event_arg)
880 {
881 	struct smu_context *smu = adev->powerplay.pp_handle;
882 	int ret = 0;
883 
884 	if (!is_support_sw_smu(adev))
885 		return -EOPNOTSUPP;
886 
887 	mutex_lock(&adev->pm.mutex);
888 	ret = smu_wait_for_event(smu, event, event_arg);
889 	mutex_unlock(&adev->pm.mutex);
890 
891 	return ret;
892 }
893 
amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device * adev,bool value)894 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
895 {
896 	struct smu_context *smu = adev->powerplay.pp_handle;
897 	int ret = 0;
898 
899 	if (!is_support_sw_smu(adev))
900 		return -EOPNOTSUPP;
901 
902 	mutex_lock(&adev->pm.mutex);
903 	ret = smu_set_residency_gfxoff(smu, value);
904 	mutex_unlock(&adev->pm.mutex);
905 
906 	return ret;
907 }
908 
amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device * adev,u32 * value)909 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
910 {
911 	struct smu_context *smu = adev->powerplay.pp_handle;
912 	int ret = 0;
913 
914 	if (!is_support_sw_smu(adev))
915 		return -EOPNOTSUPP;
916 
917 	mutex_lock(&adev->pm.mutex);
918 	ret = smu_get_residency_gfxoff(smu, value);
919 	mutex_unlock(&adev->pm.mutex);
920 
921 	return ret;
922 }
923 
amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device * adev,u64 * value)924 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
925 {
926 	struct smu_context *smu = adev->powerplay.pp_handle;
927 	int ret = 0;
928 
929 	if (!is_support_sw_smu(adev))
930 		return -EOPNOTSUPP;
931 
932 	mutex_lock(&adev->pm.mutex);
933 	ret = smu_get_entrycount_gfxoff(smu, value);
934 	mutex_unlock(&adev->pm.mutex);
935 
936 	return ret;
937 }
938 
amdgpu_dpm_get_status_gfxoff(struct amdgpu_device * adev,uint32_t * value)939 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
940 {
941 	struct smu_context *smu = adev->powerplay.pp_handle;
942 	int ret = 0;
943 
944 	if (!is_support_sw_smu(adev))
945 		return -EOPNOTSUPP;
946 
947 	mutex_lock(&adev->pm.mutex);
948 	ret = smu_get_status_gfxoff(smu, value);
949 	mutex_unlock(&adev->pm.mutex);
950 
951 	return ret;
952 }
953 
amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device * adev)954 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
955 {
956 	struct smu_context *smu = adev->powerplay.pp_handle;
957 
958 	if (!is_support_sw_smu(adev))
959 		return 0;
960 
961 	return atomic64_read(&smu->throttle_int_counter);
962 }
963 
964 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
965  * @adev: amdgpu_device pointer
966  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
967  *
968  */
amdgpu_dpm_gfx_state_change(struct amdgpu_device * adev,enum gfx_change_state state)969 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
970 				 enum gfx_change_state state)
971 {
972 	mutex_lock(&adev->pm.mutex);
973 	if (adev->powerplay.pp_funcs &&
974 	    adev->powerplay.pp_funcs->gfx_state_change_set)
975 		((adev)->powerplay.pp_funcs->gfx_state_change_set(
976 			(adev)->powerplay.pp_handle, state));
977 	mutex_unlock(&adev->pm.mutex);
978 }
979 
amdgpu_dpm_get_ecc_info(struct amdgpu_device * adev,void * umc_ecc)980 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
981 			    void *umc_ecc)
982 {
983 	struct smu_context *smu = adev->powerplay.pp_handle;
984 	int ret = 0;
985 
986 	if (!is_support_sw_smu(adev))
987 		return -EOPNOTSUPP;
988 
989 	mutex_lock(&adev->pm.mutex);
990 	ret = smu_get_ecc_info(smu, umc_ecc);
991 	mutex_unlock(&adev->pm.mutex);
992 
993 	return ret;
994 }
995 
amdgpu_dpm_get_vce_clock_state(struct amdgpu_device * adev,uint32_t idx)996 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
997 						     uint32_t idx)
998 {
999 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1000 	struct amd_vce_state *vstate = NULL;
1001 
1002 	if (!pp_funcs->get_vce_clock_state)
1003 		return NULL;
1004 
1005 	mutex_lock(&adev->pm.mutex);
1006 	vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
1007 					       idx);
1008 	mutex_unlock(&adev->pm.mutex);
1009 
1010 	return vstate;
1011 }
1012 
amdgpu_dpm_get_current_power_state(struct amdgpu_device * adev,enum amd_pm_state_type * state)1013 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
1014 					enum amd_pm_state_type *state)
1015 {
1016 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1017 
1018 	mutex_lock(&adev->pm.mutex);
1019 
1020 	if (!pp_funcs->get_current_power_state) {
1021 		*state = adev->pm.dpm.user_state;
1022 		goto out;
1023 	}
1024 
1025 	*state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
1026 	if (*state < POWER_STATE_TYPE_DEFAULT ||
1027 	    *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
1028 		*state = adev->pm.dpm.user_state;
1029 
1030 out:
1031 	mutex_unlock(&adev->pm.mutex);
1032 }
1033 
amdgpu_dpm_set_power_state(struct amdgpu_device * adev,enum amd_pm_state_type state)1034 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
1035 				enum amd_pm_state_type state)
1036 {
1037 	mutex_lock(&adev->pm.mutex);
1038 	adev->pm.dpm.user_state = state;
1039 	mutex_unlock(&adev->pm.mutex);
1040 
1041 	if (is_support_sw_smu(adev))
1042 		return;
1043 
1044 	if (amdgpu_dpm_dispatch_task(adev,
1045 				     AMD_PP_TASK_ENABLE_USER_STATE,
1046 				     &state) == -EOPNOTSUPP)
1047 		amdgpu_dpm_compute_clocks(adev);
1048 }
1049 
amdgpu_dpm_get_performance_level(struct amdgpu_device * adev)1050 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
1051 {
1052 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1053 	enum amd_dpm_forced_level level;
1054 
1055 	if (!pp_funcs)
1056 		return AMD_DPM_FORCED_LEVEL_AUTO;
1057 
1058 	mutex_lock(&adev->pm.mutex);
1059 	if (pp_funcs->get_performance_level)
1060 		level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
1061 	else
1062 		level = adev->pm.dpm.forced_level;
1063 	mutex_unlock(&adev->pm.mutex);
1064 
1065 	return level;
1066 }
1067 
amdgpu_dpm_enter_umd_state(struct amdgpu_device * adev)1068 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
1069 {
1070 	/* enter UMD Pstate */
1071 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1072 					       AMD_PG_STATE_UNGATE);
1073 	amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1074 					       AMD_CG_STATE_UNGATE);
1075 }
1076 
amdgpu_dpm_exit_umd_state(struct amdgpu_device * adev)1077 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
1078 {
1079 	/* exit UMD Pstate */
1080 	amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1081 					       AMD_CG_STATE_GATE);
1082 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1083 					       AMD_PG_STATE_GATE);
1084 }
1085 
amdgpu_dpm_force_performance_level(struct amdgpu_device * adev,enum amd_dpm_forced_level level)1086 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
1087 				       enum amd_dpm_forced_level level)
1088 {
1089 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1090 	enum amd_dpm_forced_level current_level;
1091 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1092 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1093 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1094 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1095 
1096 	if (!pp_funcs || !pp_funcs->force_performance_level)
1097 		return 0;
1098 
1099 	if (adev->pm.dpm.thermal_active)
1100 		return -EINVAL;
1101 
1102 	current_level = amdgpu_dpm_get_performance_level(adev);
1103 	if (current_level == level)
1104 		return 0;
1105 
1106 	if (!(current_level & profile_mode_mask) &&
1107 	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
1108 		return -EINVAL;
1109 
1110 	if (adev->asic_type == CHIP_RAVEN) {
1111 		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
1112 			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1113 			    level == AMD_DPM_FORCED_LEVEL_MANUAL)
1114 				amdgpu_gfx_off_ctrl(adev, false);
1115 			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
1116 				 level != AMD_DPM_FORCED_LEVEL_MANUAL)
1117 				amdgpu_gfx_off_ctrl(adev, true);
1118 		}
1119 	}
1120 
1121 	if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
1122 		amdgpu_dpm_enter_umd_state(adev);
1123 	else if ((current_level & profile_mode_mask) &&
1124 		 !(level & profile_mode_mask))
1125 		amdgpu_dpm_exit_umd_state(adev);
1126 
1127 	mutex_lock(&adev->pm.mutex);
1128 
1129 	if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1130 					      level)) {
1131 		mutex_unlock(&adev->pm.mutex);
1132 		/* If new level failed, retain the umd state as before */
1133 		if (!(current_level & profile_mode_mask) &&
1134 		    (level & profile_mode_mask))
1135 			amdgpu_dpm_exit_umd_state(adev);
1136 		else if ((current_level & profile_mode_mask) &&
1137 			 !(level & profile_mode_mask))
1138 			amdgpu_dpm_enter_umd_state(adev);
1139 
1140 		return -EINVAL;
1141 	}
1142 
1143 	adev->pm.dpm.forced_level = level;
1144 
1145 	mutex_unlock(&adev->pm.mutex);
1146 
1147 	return 0;
1148 }
1149 
amdgpu_dpm_get_pp_num_states(struct amdgpu_device * adev,struct pp_states_info * states)1150 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1151 				 struct pp_states_info *states)
1152 {
1153 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1154 	int ret = 0;
1155 
1156 	if (!pp_funcs->get_pp_num_states)
1157 		return -EOPNOTSUPP;
1158 
1159 	mutex_lock(&adev->pm.mutex);
1160 	ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1161 					  states);
1162 	mutex_unlock(&adev->pm.mutex);
1163 
1164 	return ret;
1165 }
1166 
amdgpu_dpm_dispatch_task(struct amdgpu_device * adev,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)1167 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1168 			      enum amd_pp_task task_id,
1169 			      enum amd_pm_state_type *user_state)
1170 {
1171 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1172 	int ret = 0;
1173 
1174 	if (!pp_funcs->dispatch_tasks)
1175 		return -EOPNOTSUPP;
1176 
1177 	mutex_lock(&adev->pm.mutex);
1178 	ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1179 				       task_id,
1180 				       user_state);
1181 	mutex_unlock(&adev->pm.mutex);
1182 
1183 	return ret;
1184 }
1185 
amdgpu_dpm_get_pp_table(struct amdgpu_device * adev,char ** table)1186 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1187 {
1188 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1189 	int ret = 0;
1190 
1191 	if (!table)
1192 		return -EINVAL;
1193 
1194 	if (amdgpu_sriov_vf(adev) || !pp_funcs->get_pp_table || adev->scpm_enabled)
1195 		return -EOPNOTSUPP;
1196 
1197 	mutex_lock(&adev->pm.mutex);
1198 	ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1199 				     table);
1200 	mutex_unlock(&adev->pm.mutex);
1201 
1202 	return ret;
1203 }
1204 
amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1205 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1206 				      uint32_t type,
1207 				      long *input,
1208 				      uint32_t size)
1209 {
1210 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1211 	int ret = 0;
1212 
1213 	if (!pp_funcs->set_fine_grain_clk_vol)
1214 		return 0;
1215 
1216 	mutex_lock(&adev->pm.mutex);
1217 	ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1218 					       type,
1219 					       input,
1220 					       size);
1221 	mutex_unlock(&adev->pm.mutex);
1222 
1223 	return ret;
1224 }
1225 
amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1226 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1227 				  uint32_t type,
1228 				  long *input,
1229 				  uint32_t size)
1230 {
1231 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1232 	int ret = 0;
1233 
1234 	if (!pp_funcs->odn_edit_dpm_table)
1235 		return 0;
1236 
1237 	mutex_lock(&adev->pm.mutex);
1238 	ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1239 					   type,
1240 					   input,
1241 					   size);
1242 	mutex_unlock(&adev->pm.mutex);
1243 
1244 	return ret;
1245 }
1246 
amdgpu_dpm_print_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf)1247 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1248 				  enum pp_clock_type type,
1249 				  char *buf)
1250 {
1251 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1252 	int ret = 0;
1253 
1254 	if (!pp_funcs->print_clock_levels)
1255 		return 0;
1256 
1257 	mutex_lock(&adev->pm.mutex);
1258 	ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1259 					   type,
1260 					   buf);
1261 	mutex_unlock(&adev->pm.mutex);
1262 
1263 	return ret;
1264 }
1265 
amdgpu_dpm_emit_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf,int * offset)1266 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1267 				  enum pp_clock_type type,
1268 				  char *buf,
1269 				  int *offset)
1270 {
1271 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1272 	int ret = 0;
1273 
1274 	if (!pp_funcs->emit_clock_levels)
1275 		return -ENOENT;
1276 
1277 	mutex_lock(&adev->pm.mutex);
1278 	ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1279 					   type,
1280 					   buf,
1281 					   offset);
1282 	mutex_unlock(&adev->pm.mutex);
1283 
1284 	return ret;
1285 }
1286 
amdgpu_dpm_set_ppfeature_status(struct amdgpu_device * adev,uint64_t ppfeature_masks)1287 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1288 				    uint64_t ppfeature_masks)
1289 {
1290 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1291 	int ret = 0;
1292 
1293 	if (!pp_funcs->set_ppfeature_status)
1294 		return 0;
1295 
1296 	mutex_lock(&adev->pm.mutex);
1297 	ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1298 					     ppfeature_masks);
1299 	mutex_unlock(&adev->pm.mutex);
1300 
1301 	return ret;
1302 }
1303 
amdgpu_dpm_get_ppfeature_status(struct amdgpu_device * adev,char * buf)1304 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1305 {
1306 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1307 	int ret = 0;
1308 
1309 	if (!pp_funcs->get_ppfeature_status)
1310 		return 0;
1311 
1312 	mutex_lock(&adev->pm.mutex);
1313 	ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1314 					     buf);
1315 	mutex_unlock(&adev->pm.mutex);
1316 
1317 	return ret;
1318 }
1319 
amdgpu_dpm_force_clock_level(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t mask)1320 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1321 				 enum pp_clock_type type,
1322 				 uint32_t mask)
1323 {
1324 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1325 	int ret = 0;
1326 
1327 	if (!pp_funcs->force_clock_level)
1328 		return 0;
1329 
1330 	mutex_lock(&adev->pm.mutex);
1331 	ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1332 					  type,
1333 					  mask);
1334 	mutex_unlock(&adev->pm.mutex);
1335 
1336 	return ret;
1337 }
1338 
amdgpu_dpm_get_sclk_od(struct amdgpu_device * adev)1339 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1340 {
1341 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1342 	int ret = 0;
1343 
1344 	if (!pp_funcs->get_sclk_od)
1345 		return -EOPNOTSUPP;
1346 
1347 	mutex_lock(&adev->pm.mutex);
1348 	ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1349 	mutex_unlock(&adev->pm.mutex);
1350 
1351 	return ret;
1352 }
1353 
amdgpu_dpm_set_sclk_od(struct amdgpu_device * adev,uint32_t value)1354 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1355 {
1356 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1357 
1358 	if (is_support_sw_smu(adev))
1359 		return -EOPNOTSUPP;
1360 
1361 	mutex_lock(&adev->pm.mutex);
1362 	if (pp_funcs->set_sclk_od)
1363 		pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1364 	mutex_unlock(&adev->pm.mutex);
1365 
1366 	if (amdgpu_dpm_dispatch_task(adev,
1367 				     AMD_PP_TASK_READJUST_POWER_STATE,
1368 				     NULL) == -EOPNOTSUPP) {
1369 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1370 		amdgpu_dpm_compute_clocks(adev);
1371 	}
1372 
1373 	return 0;
1374 }
1375 
amdgpu_dpm_get_mclk_od(struct amdgpu_device * adev)1376 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1377 {
1378 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1379 	int ret = 0;
1380 
1381 	if (!pp_funcs->get_mclk_od)
1382 		return -EOPNOTSUPP;
1383 
1384 	mutex_lock(&adev->pm.mutex);
1385 	ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1386 	mutex_unlock(&adev->pm.mutex);
1387 
1388 	return ret;
1389 }
1390 
amdgpu_dpm_set_mclk_od(struct amdgpu_device * adev,uint32_t value)1391 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1392 {
1393 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1394 
1395 	if (is_support_sw_smu(adev))
1396 		return -EOPNOTSUPP;
1397 
1398 	mutex_lock(&adev->pm.mutex);
1399 	if (pp_funcs->set_mclk_od)
1400 		pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1401 	mutex_unlock(&adev->pm.mutex);
1402 
1403 	if (amdgpu_dpm_dispatch_task(adev,
1404 				     AMD_PP_TASK_READJUST_POWER_STATE,
1405 				     NULL) == -EOPNOTSUPP) {
1406 		adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1407 		amdgpu_dpm_compute_clocks(adev);
1408 	}
1409 
1410 	return 0;
1411 }
1412 
amdgpu_dpm_get_power_profile_mode(struct amdgpu_device * adev,char * buf)1413 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1414 				      char *buf)
1415 {
1416 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1417 	int ret = 0;
1418 
1419 	if (!pp_funcs->get_power_profile_mode)
1420 		return -EOPNOTSUPP;
1421 
1422 	mutex_lock(&adev->pm.mutex);
1423 	ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1424 					       buf);
1425 	mutex_unlock(&adev->pm.mutex);
1426 
1427 	return ret;
1428 }
1429 
amdgpu_dpm_set_power_profile_mode(struct amdgpu_device * adev,long * input,uint32_t size)1430 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1431 				      long *input, uint32_t size)
1432 {
1433 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1434 	int ret = 0;
1435 
1436 	if (!pp_funcs->set_power_profile_mode)
1437 		return 0;
1438 
1439 	mutex_lock(&adev->pm.mutex);
1440 	ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1441 					       input,
1442 					       size);
1443 	mutex_unlock(&adev->pm.mutex);
1444 
1445 	return ret;
1446 }
1447 
amdgpu_dpm_get_gpu_metrics(struct amdgpu_device * adev,void ** table)1448 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1449 {
1450 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1451 	int ret = 0;
1452 
1453 	if (!pp_funcs->get_gpu_metrics)
1454 		return 0;
1455 
1456 	mutex_lock(&adev->pm.mutex);
1457 	ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1458 					table);
1459 	mutex_unlock(&adev->pm.mutex);
1460 
1461 	return ret;
1462 }
1463 
amdgpu_dpm_get_pm_metrics(struct amdgpu_device * adev,void * pm_metrics,size_t size)1464 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1465 				  size_t size)
1466 {
1467 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1468 	int ret = 0;
1469 
1470 	if (!pp_funcs->get_pm_metrics)
1471 		return -EOPNOTSUPP;
1472 
1473 	mutex_lock(&adev->pm.mutex);
1474 	ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1475 				       size);
1476 	mutex_unlock(&adev->pm.mutex);
1477 
1478 	return ret;
1479 }
1480 
amdgpu_dpm_get_fan_control_mode(struct amdgpu_device * adev,uint32_t * fan_mode)1481 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1482 				    uint32_t *fan_mode)
1483 {
1484 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1485 	int ret = 0;
1486 
1487 	if (!pp_funcs->get_fan_control_mode)
1488 		return -EOPNOTSUPP;
1489 
1490 	mutex_lock(&adev->pm.mutex);
1491 	ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1492 					     fan_mode);
1493 	mutex_unlock(&adev->pm.mutex);
1494 
1495 	return ret;
1496 }
1497 
amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device * adev,uint32_t speed)1498 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1499 				 uint32_t speed)
1500 {
1501 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1502 	int ret = 0;
1503 
1504 	if (!pp_funcs->set_fan_speed_pwm)
1505 		return -EOPNOTSUPP;
1506 
1507 	mutex_lock(&adev->pm.mutex);
1508 	ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1509 					  speed);
1510 	mutex_unlock(&adev->pm.mutex);
1511 
1512 	return ret;
1513 }
1514 
amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device * adev,uint32_t * speed)1515 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1516 				 uint32_t *speed)
1517 {
1518 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1519 	int ret = 0;
1520 
1521 	if (!pp_funcs->get_fan_speed_pwm)
1522 		return -EOPNOTSUPP;
1523 
1524 	mutex_lock(&adev->pm.mutex);
1525 	ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1526 					  speed);
1527 	mutex_unlock(&adev->pm.mutex);
1528 
1529 	return ret;
1530 }
1531 
amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device * adev,uint32_t * speed)1532 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1533 				 uint32_t *speed)
1534 {
1535 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1536 	int ret = 0;
1537 
1538 	if (!pp_funcs->get_fan_speed_rpm)
1539 		return -EOPNOTSUPP;
1540 
1541 	mutex_lock(&adev->pm.mutex);
1542 	ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1543 					  speed);
1544 	mutex_unlock(&adev->pm.mutex);
1545 
1546 	return ret;
1547 }
1548 
amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device * adev,uint32_t speed)1549 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1550 				 uint32_t speed)
1551 {
1552 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1553 	int ret = 0;
1554 
1555 	if (!pp_funcs->set_fan_speed_rpm)
1556 		return -EOPNOTSUPP;
1557 
1558 	mutex_lock(&adev->pm.mutex);
1559 	ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1560 					  speed);
1561 	mutex_unlock(&adev->pm.mutex);
1562 
1563 	return ret;
1564 }
1565 
amdgpu_dpm_set_fan_control_mode(struct amdgpu_device * adev,uint32_t mode)1566 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1567 				    uint32_t mode)
1568 {
1569 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1570 	int ret = 0;
1571 
1572 	if (!pp_funcs->set_fan_control_mode)
1573 		return -EOPNOTSUPP;
1574 
1575 	mutex_lock(&adev->pm.mutex);
1576 	ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1577 					     mode);
1578 	mutex_unlock(&adev->pm.mutex);
1579 
1580 	return ret;
1581 }
1582 
amdgpu_dpm_get_power_limit(struct amdgpu_device * adev,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)1583 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1584 			       uint32_t *limit,
1585 			       enum pp_power_limit_level pp_limit_level,
1586 			       enum pp_power_type power_type)
1587 {
1588 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1589 	int ret = 0;
1590 
1591 	if (!pp_funcs->get_power_limit)
1592 		return -ENODATA;
1593 
1594 	mutex_lock(&adev->pm.mutex);
1595 	ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1596 					limit,
1597 					pp_limit_level,
1598 					power_type);
1599 	mutex_unlock(&adev->pm.mutex);
1600 
1601 	return ret;
1602 }
1603 
amdgpu_dpm_set_power_limit(struct amdgpu_device * adev,uint32_t limit_type,uint32_t limit)1604 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1605 			       uint32_t limit_type,
1606 			       uint32_t limit)
1607 {
1608 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1609 	int ret = 0;
1610 
1611 	if (!pp_funcs->set_power_limit)
1612 		return -EINVAL;
1613 
1614 	mutex_lock(&adev->pm.mutex);
1615 	ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1616 					limit_type, limit);
1617 	mutex_unlock(&adev->pm.mutex);
1618 
1619 	return ret;
1620 }
1621 
amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device * adev)1622 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1623 {
1624 	bool cclk_dpm_supported = false;
1625 
1626 	if (!is_support_sw_smu(adev))
1627 		return false;
1628 
1629 	mutex_lock(&adev->pm.mutex);
1630 	cclk_dpm_supported = is_support_cclk_dpm(adev);
1631 	mutex_unlock(&adev->pm.mutex);
1632 
1633 	return (int)cclk_dpm_supported;
1634 }
1635 
amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device * adev,struct seq_file * m)1636 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1637 						       struct seq_file *m)
1638 {
1639 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1640 
1641 	if (!pp_funcs->debugfs_print_current_performance_level)
1642 		return -EOPNOTSUPP;
1643 
1644 	mutex_lock(&adev->pm.mutex);
1645 	pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1646 							  m);
1647 	mutex_unlock(&adev->pm.mutex);
1648 
1649 	return 0;
1650 }
1651 
amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device * adev,void ** addr,size_t * size)1652 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1653 				       void **addr,
1654 				       size_t *size)
1655 {
1656 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1657 	int ret = 0;
1658 
1659 	if (!pp_funcs->get_smu_prv_buf_details)
1660 		return -ENOSYS;
1661 
1662 	mutex_lock(&adev->pm.mutex);
1663 	ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1664 						addr,
1665 						size);
1666 	mutex_unlock(&adev->pm.mutex);
1667 
1668 	return ret;
1669 }
1670 
amdgpu_dpm_is_overdrive_supported(struct amdgpu_device * adev)1671 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1672 {
1673 	if (is_support_sw_smu(adev)) {
1674 		struct smu_context *smu = adev->powerplay.pp_handle;
1675 
1676 		return (smu->od_enabled || smu->is_apu);
1677 	} else {
1678 		struct pp_hwmgr *hwmgr;
1679 
1680 		/*
1681 		 * dpm on some legacy asics don't carry od_enabled member
1682 		 * as its pp_handle is casted directly from adev.
1683 		 */
1684 		if (amdgpu_dpm_is_legacy_dpm(adev))
1685 			return false;
1686 
1687 		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1688 
1689 		return hwmgr->od_enabled;
1690 	}
1691 }
1692 
amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device * adev)1693 int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev)
1694 {
1695 	if (is_support_sw_smu(adev)) {
1696 		struct smu_context *smu = adev->powerplay.pp_handle;
1697 
1698 		return smu->od_enabled;
1699 	} else {
1700 		struct pp_hwmgr *hwmgr;
1701 
1702 		/*
1703 		 * dpm on some legacy asics don't carry od_enabled member
1704 		 * as its pp_handle is casted directly from adev.
1705 		 */
1706 		if (amdgpu_dpm_is_legacy_dpm(adev))
1707 			return false;
1708 
1709 		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1710 
1711 		return hwmgr->od_enabled;
1712 	}
1713 }
1714 
amdgpu_dpm_set_pp_table(struct amdgpu_device * adev,const char * buf,size_t size)1715 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1716 			    const char *buf,
1717 			    size_t size)
1718 {
1719 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1720 	int ret = 0;
1721 
1722 	if (!buf || !size)
1723 		return -EINVAL;
1724 
1725 	if (amdgpu_sriov_vf(adev) || !pp_funcs->set_pp_table || adev->scpm_enabled)
1726 		return -EOPNOTSUPP;
1727 
1728 	mutex_lock(&adev->pm.mutex);
1729 	ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1730 				     buf,
1731 				     size);
1732 	mutex_unlock(&adev->pm.mutex);
1733 
1734 	return ret;
1735 }
1736 
amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device * adev)1737 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1738 {
1739 	struct smu_context *smu = adev->powerplay.pp_handle;
1740 
1741 	if (!is_support_sw_smu(adev))
1742 		return INT_MAX;
1743 
1744 	return smu->cpu_core_num;
1745 }
1746 
amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device * adev)1747 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1748 {
1749 	if (!is_support_sw_smu(adev))
1750 		return;
1751 
1752 	amdgpu_smu_stb_debug_fs_init(adev);
1753 }
1754 
amdgpu_dpm_display_configuration_change(struct amdgpu_device * adev,const struct amd_pp_display_configuration * input)1755 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1756 					    const struct amd_pp_display_configuration *input)
1757 {
1758 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1759 	int ret = 0;
1760 
1761 	if (!pp_funcs->display_configuration_change)
1762 		return 0;
1763 
1764 	mutex_lock(&adev->pm.mutex);
1765 	ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1766 						     input);
1767 	mutex_unlock(&adev->pm.mutex);
1768 
1769 	return ret;
1770 }
1771 
amdgpu_dpm_get_clock_by_type(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1772 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1773 				 enum amd_pp_clock_type type,
1774 				 struct amd_pp_clocks *clocks)
1775 {
1776 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1777 	int ret = 0;
1778 
1779 	if (!pp_funcs->get_clock_by_type)
1780 		return 0;
1781 
1782 	mutex_lock(&adev->pm.mutex);
1783 	ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1784 					  type,
1785 					  clocks);
1786 	mutex_unlock(&adev->pm.mutex);
1787 
1788 	return ret;
1789 }
1790 
amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device * adev,struct amd_pp_simple_clock_info * clocks)1791 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1792 						struct amd_pp_simple_clock_info *clocks)
1793 {
1794 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1795 	int ret = 0;
1796 
1797 	if (!pp_funcs->get_display_mode_validation_clocks)
1798 		return 0;
1799 
1800 	mutex_lock(&adev->pm.mutex);
1801 	ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1802 							   clocks);
1803 	mutex_unlock(&adev->pm.mutex);
1804 
1805 	return ret;
1806 }
1807 
amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1808 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1809 					      enum amd_pp_clock_type type,
1810 					      struct pp_clock_levels_with_latency *clocks)
1811 {
1812 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1813 	int ret = 0;
1814 
1815 	if (!pp_funcs->get_clock_by_type_with_latency)
1816 		return 0;
1817 
1818 	mutex_lock(&adev->pm.mutex);
1819 	ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1820 						       type,
1821 						       clocks);
1822 	mutex_unlock(&adev->pm.mutex);
1823 
1824 	return ret;
1825 }
1826 
amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1827 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1828 					      enum amd_pp_clock_type type,
1829 					      struct pp_clock_levels_with_voltage *clocks)
1830 {
1831 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1832 	int ret = 0;
1833 
1834 	if (!pp_funcs->get_clock_by_type_with_voltage)
1835 		return 0;
1836 
1837 	mutex_lock(&adev->pm.mutex);
1838 	ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1839 						       type,
1840 						       clocks);
1841 	mutex_unlock(&adev->pm.mutex);
1842 
1843 	return ret;
1844 }
1845 
amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device * adev,void * clock_ranges)1846 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1847 					       void *clock_ranges)
1848 {
1849 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1850 	int ret = 0;
1851 
1852 	if (!pp_funcs->set_watermarks_for_clocks_ranges)
1853 		return -EOPNOTSUPP;
1854 
1855 	mutex_lock(&adev->pm.mutex);
1856 	ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1857 							 clock_ranges);
1858 	mutex_unlock(&adev->pm.mutex);
1859 
1860 	return ret;
1861 }
1862 
amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device * adev,struct pp_display_clock_request * clock)1863 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1864 					     struct pp_display_clock_request *clock)
1865 {
1866 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1867 	int ret = 0;
1868 
1869 	if (!pp_funcs->display_clock_voltage_request)
1870 		return -EOPNOTSUPP;
1871 
1872 	mutex_lock(&adev->pm.mutex);
1873 	ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1874 						      clock);
1875 	mutex_unlock(&adev->pm.mutex);
1876 
1877 	return ret;
1878 }
1879 
amdgpu_dpm_get_current_clocks(struct amdgpu_device * adev,struct amd_pp_clock_info * clocks)1880 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1881 				  struct amd_pp_clock_info *clocks)
1882 {
1883 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1884 	int ret = 0;
1885 
1886 	if (!pp_funcs->get_current_clocks)
1887 		return -EOPNOTSUPP;
1888 
1889 	mutex_lock(&adev->pm.mutex);
1890 	ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1891 					   clocks);
1892 	mutex_unlock(&adev->pm.mutex);
1893 
1894 	return ret;
1895 }
1896 
amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device * adev)1897 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1898 {
1899 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1900 
1901 	if (!pp_funcs->notify_smu_enable_pwe)
1902 		return;
1903 
1904 	mutex_lock(&adev->pm.mutex);
1905 	pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1906 	mutex_unlock(&adev->pm.mutex);
1907 }
1908 
amdgpu_dpm_set_active_display_count(struct amdgpu_device * adev,uint32_t count)1909 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1910 					uint32_t count)
1911 {
1912 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1913 	int ret = 0;
1914 
1915 	if (!pp_funcs->set_active_display_count)
1916 		return -EOPNOTSUPP;
1917 
1918 	mutex_lock(&adev->pm.mutex);
1919 	ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1920 						 count);
1921 	mutex_unlock(&adev->pm.mutex);
1922 
1923 	return ret;
1924 }
1925 
amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device * adev,uint32_t clock)1926 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1927 					  uint32_t clock)
1928 {
1929 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1930 	int ret = 0;
1931 
1932 	if (!pp_funcs->set_min_deep_sleep_dcefclk)
1933 		return -EOPNOTSUPP;
1934 
1935 	mutex_lock(&adev->pm.mutex);
1936 	ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1937 						   clock);
1938 	mutex_unlock(&adev->pm.mutex);
1939 
1940 	return ret;
1941 }
1942 
amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1943 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1944 					     uint32_t clock)
1945 {
1946 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1947 
1948 	if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1949 		return;
1950 
1951 	mutex_lock(&adev->pm.mutex);
1952 	pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1953 					       clock);
1954 	mutex_unlock(&adev->pm.mutex);
1955 }
1956 
amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1957 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1958 					  uint32_t clock)
1959 {
1960 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1961 
1962 	if (!pp_funcs->set_hard_min_fclk_by_freq)
1963 		return;
1964 
1965 	mutex_lock(&adev->pm.mutex);
1966 	pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1967 					    clock);
1968 	mutex_unlock(&adev->pm.mutex);
1969 }
1970 
amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device * adev,bool disable_memory_clock_switch)1971 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1972 						   bool disable_memory_clock_switch)
1973 {
1974 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1975 	int ret = 0;
1976 
1977 	if (!pp_funcs->display_disable_memory_clock_switch)
1978 		return 0;
1979 
1980 	mutex_lock(&adev->pm.mutex);
1981 	ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1982 							    disable_memory_clock_switch);
1983 	mutex_unlock(&adev->pm.mutex);
1984 
1985 	return ret;
1986 }
1987 
amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device * adev,struct pp_smu_nv_clock_table * max_clocks)1988 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1989 						struct pp_smu_nv_clock_table *max_clocks)
1990 {
1991 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1992 	int ret = 0;
1993 
1994 	if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1995 		return -EOPNOTSUPP;
1996 
1997 	mutex_lock(&adev->pm.mutex);
1998 	ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1999 							 max_clocks);
2000 	mutex_unlock(&adev->pm.mutex);
2001 
2002 	return ret;
2003 }
2004 
amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device * adev,unsigned int * clock_values_in_khz,unsigned int * num_states)2005 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
2006 						  unsigned int *clock_values_in_khz,
2007 						  unsigned int *num_states)
2008 {
2009 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2010 	int ret = 0;
2011 
2012 	if (!pp_funcs->get_uclk_dpm_states)
2013 		return -EOPNOTSUPP;
2014 
2015 	mutex_lock(&adev->pm.mutex);
2016 	ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
2017 					    clock_values_in_khz,
2018 					    num_states);
2019 	mutex_unlock(&adev->pm.mutex);
2020 
2021 	return ret;
2022 }
2023 
amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device * adev,struct dpm_clocks * clock_table)2024 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
2025 				   struct dpm_clocks *clock_table)
2026 {
2027 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2028 	int ret = 0;
2029 
2030 	if (!pp_funcs->get_dpm_clock_table)
2031 		return -EOPNOTSUPP;
2032 
2033 	mutex_lock(&adev->pm.mutex);
2034 	ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
2035 					    clock_table);
2036 	mutex_unlock(&adev->pm.mutex);
2037 
2038 	return ret;
2039 }
2040 
2041 /**
2042  * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute
2043  * partition
2044  * @adev: Pointer to the device.
2045  * @type: Identifier for the temperature type metrics to be fetched.
2046  * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
2047  * function returns the size of the metrics structure.
2048  *
2049  * This function retrieves metrics for a specific temperature type, If the
2050  * table parameter is NULL, the function returns the size of the metrics
2051  * structure without populating it.
2052  *
2053  * Return: Size of the metrics structure on success, or a negative error code on failure.
2054  */
amdgpu_dpm_get_temp_metrics(struct amdgpu_device * adev,enum smu_temp_metric_type type,void * table)2055 ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
2056 				    enum smu_temp_metric_type type, void *table)
2057 {
2058 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2059 	int ret;
2060 
2061 	if (!pp_funcs->get_temp_metrics ||
2062 	    !amdgpu_dpm_is_temp_metrics_supported(adev, type))
2063 		return -EOPNOTSUPP;
2064 
2065 	mutex_lock(&adev->pm.mutex);
2066 	ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table);
2067 	mutex_unlock(&adev->pm.mutex);
2068 
2069 	return ret;
2070 }
2071 
2072 /**
2073  * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support
2074  * is available
2075  * @adev: Pointer to the device.
2076  * @type: Identifier for the temperature type metrics to be fetched.
2077  *
2078  * This function returns metrics if specific temperature metrics type is supported or not.
2079  *
2080  * Return: True in case of metrics type supported else false.
2081  */
amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device * adev,enum smu_temp_metric_type type)2082 bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
2083 					  enum smu_temp_metric_type type)
2084 {
2085 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2086 	bool support_temp_metrics = false;
2087 
2088 	if (!pp_funcs->temp_metrics_is_supported)
2089 		return support_temp_metrics;
2090 
2091 	if (is_support_sw_smu(adev)) {
2092 		mutex_lock(&adev->pm.mutex);
2093 		support_temp_metrics =
2094 			pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type);
2095 		mutex_unlock(&adev->pm.mutex);
2096 	}
2097 
2098 	return support_temp_metrics;
2099 }
2100 
2101 /**
2102  * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
2103  * partition
2104  * @adev: Pointer to the device.
2105  * @xcp_id: Identifier of the XCP for which metrics are to be retrieved.
2106  * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
2107  * function returns the size of the metrics structure.
2108  *
2109  * This function retrieves metrics for a specific XCP, including details such as
2110  * VCN/JPEG activity, clock frequencies, and other performance metrics. If the
2111  * table parameter is NULL, the function returns the size of the metrics
2112  * structure without populating it.
2113  *
2114  * Return: Size of the metrics structure on success, or a negative error code on failure.
2115  */
amdgpu_dpm_get_xcp_metrics(struct amdgpu_device * adev,int xcp_id,void * table)2116 ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
2117 				   void *table)
2118 {
2119 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2120 	int ret = 0;
2121 
2122 	if (!pp_funcs->get_xcp_metrics)
2123 		return 0;
2124 
2125 	mutex_lock(&adev->pm.mutex);
2126 	ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id,
2127 					table);
2128 	mutex_unlock(&adev->pm.mutex);
2129 
2130 	return ret;
2131 }
2132 
amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device * adev)2133 const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev)
2134 {
2135 	void *pp_handle = adev->powerplay.pp_handle;
2136 
2137 	return smu_get_ras_smu_driver(pp_handle);
2138 }
2139