xref: /linux/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include <linux/reboot.h>
30 #include "amd_shared.h"
31 #include "amd_powerplay.h"
32 #include "power_state.h"
33 #include "amdgpu.h"
34 #include "hwmgr.h"
35 #include "amdgpu_dpm_internal.h"
36 #include "amdgpu_display.h"
37 
38 static const struct amd_pm_funcs pp_dpm_funcs;
39 
40 static int amd_powerplay_create(struct amdgpu_device *adev)
41 {
42 	struct pp_hwmgr *hwmgr;
43 
44 	if (adev == NULL)
45 		return -EINVAL;
46 
47 	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
48 	if (hwmgr == NULL)
49 		return -ENOMEM;
50 
51 	hwmgr->adev = adev;
52 	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
53 	hwmgr->device = amdgpu_cgs_create_device(adev);
54 	mutex_init(&hwmgr->msg_lock);
55 	hwmgr->chip_family = adev->family;
56 	hwmgr->chip_id = adev->asic_type;
57 	hwmgr->feature_mask = adev->pm.pp_feature;
58 	hwmgr->display_config = &adev->pm.pm_display_cfg;
59 	adev->powerplay.pp_handle = hwmgr;
60 	adev->powerplay.pp_funcs = &pp_dpm_funcs;
61 	return 0;
62 }
63 
64 
65 static void amd_powerplay_destroy(struct amdgpu_device *adev)
66 {
67 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
68 
69 	mutex_destroy(&hwmgr->msg_lock);
70 
71 	kfree(hwmgr->hardcode_pp_table);
72 	hwmgr->hardcode_pp_table = NULL;
73 
74 	kfree(hwmgr);
75 	hwmgr = NULL;
76 }
77 
78 static int pp_early_init(void *handle)
79 {
80 	int ret;
81 	struct amdgpu_device *adev = handle;
82 
83 	ret = amd_powerplay_create(adev);
84 
85 	if (ret != 0)
86 		return ret;
87 
88 	ret = hwmgr_early_init(adev->powerplay.pp_handle);
89 	if (ret)
90 		return -EINVAL;
91 
92 	return 0;
93 }
94 
95 static void pp_swctf_delayed_work_handler(struct work_struct *work)
96 {
97 	struct pp_hwmgr *hwmgr =
98 		container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
99 	struct amdgpu_device *adev = hwmgr->adev;
100 	struct amdgpu_dpm_thermal *range =
101 				&adev->pm.dpm.thermal;
102 	uint32_t gpu_temperature, size = sizeof(gpu_temperature);
103 	int ret;
104 
105 	/*
106 	 * If the hotspot/edge temperature is confirmed as below SW CTF setting point
107 	 * after the delay enforced, nothing will be done.
108 	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
109 	 */
110 	if (range->sw_ctf_threshold &&
111 	    hwmgr->hwmgr_func->read_sensor) {
112 		ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
113 						     AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
114 						     &gpu_temperature,
115 						     &size);
116 		/*
117 		 * For some legacy ASICs, hotspot temperature retrieving might be not
118 		 * supported. Check the edge temperature instead then.
119 		 */
120 		if (ret == -EOPNOTSUPP)
121 			ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
122 							     AMDGPU_PP_SENSOR_EDGE_TEMP,
123 							     &gpu_temperature,
124 							     &size);
125 		if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
126 			return;
127 	}
128 
129 	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
130 	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
131 	orderly_poweroff(true);
132 }
133 
134 static int pp_sw_init(void *handle)
135 {
136 	struct amdgpu_device *adev = handle;
137 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
138 	int ret = 0;
139 
140 	ret = hwmgr_sw_init(hwmgr);
141 
142 	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
143 
144 	if (!ret)
145 		INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
146 				  pp_swctf_delayed_work_handler);
147 
148 	return ret;
149 }
150 
151 static int pp_sw_fini(void *handle)
152 {
153 	struct amdgpu_device *adev = handle;
154 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
155 
156 	hwmgr_sw_fini(hwmgr);
157 
158 	amdgpu_ucode_release(&adev->pm.fw);
159 
160 	return 0;
161 }
162 
163 static int pp_hw_init(void *handle)
164 {
165 	int ret = 0;
166 	struct amdgpu_device *adev = handle;
167 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
168 
169 	ret = hwmgr_hw_init(hwmgr);
170 
171 	if (ret)
172 		pr_err("powerplay hw init failed\n");
173 
174 	return ret;
175 }
176 
177 static int pp_hw_fini(void *handle)
178 {
179 	struct amdgpu_device *adev = handle;
180 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
181 
182 	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
183 
184 	hwmgr_hw_fini(hwmgr);
185 
186 	return 0;
187 }
188 
189 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
190 {
191 	int r = -EINVAL;
192 	void *cpu_ptr = NULL;
193 	uint64_t gpu_addr;
194 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
195 
196 	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
197 						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
198 						&adev->pm.smu_prv_buffer,
199 						&gpu_addr,
200 						&cpu_ptr)) {
201 		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
202 		return;
203 	}
204 
205 	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
206 		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
207 					lower_32_bits((unsigned long)cpu_ptr),
208 					upper_32_bits((unsigned long)cpu_ptr),
209 					lower_32_bits(gpu_addr),
210 					upper_32_bits(gpu_addr),
211 					adev->pm.smu_prv_buffer_size);
212 
213 	if (r) {
214 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
215 		adev->pm.smu_prv_buffer = NULL;
216 		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
217 	}
218 }
219 
220 static int pp_late_init(void *handle)
221 {
222 	struct amdgpu_device *adev = handle;
223 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
224 
225 	if (hwmgr && hwmgr->pm_en)
226 		hwmgr_handle_task(hwmgr,
227 					AMD_PP_TASK_COMPLETE_INIT, NULL);
228 	if (adev->pm.smu_prv_buffer_size != 0)
229 		pp_reserve_vram_for_smu(adev);
230 
231 	return 0;
232 }
233 
234 static void pp_late_fini(void *handle)
235 {
236 	struct amdgpu_device *adev = handle;
237 
238 	if (adev->pm.smu_prv_buffer)
239 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
240 	amd_powerplay_destroy(adev);
241 }
242 
243 
244 static bool pp_is_idle(void *handle)
245 {
246 	return false;
247 }
248 
249 static int pp_wait_for_idle(void *handle)
250 {
251 	return 0;
252 }
253 
254 static int pp_sw_reset(void *handle)
255 {
256 	return 0;
257 }
258 
259 static int pp_set_powergating_state(void *handle,
260 				    enum amd_powergating_state state)
261 {
262 	return 0;
263 }
264 
265 static int pp_suspend(void *handle)
266 {
267 	struct amdgpu_device *adev = handle;
268 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
269 
270 	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
271 
272 	return hwmgr_suspend(hwmgr);
273 }
274 
275 static int pp_resume(void *handle)
276 {
277 	struct amdgpu_device *adev = handle;
278 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
279 
280 	return hwmgr_resume(hwmgr);
281 }
282 
283 static int pp_set_clockgating_state(void *handle,
284 					  enum amd_clockgating_state state)
285 {
286 	return 0;
287 }
288 
289 static const struct amd_ip_funcs pp_ip_funcs = {
290 	.name = "powerplay",
291 	.early_init = pp_early_init,
292 	.late_init = pp_late_init,
293 	.sw_init = pp_sw_init,
294 	.sw_fini = pp_sw_fini,
295 	.hw_init = pp_hw_init,
296 	.hw_fini = pp_hw_fini,
297 	.late_fini = pp_late_fini,
298 	.suspend = pp_suspend,
299 	.resume = pp_resume,
300 	.is_idle = pp_is_idle,
301 	.wait_for_idle = pp_wait_for_idle,
302 	.soft_reset = pp_sw_reset,
303 	.set_clockgating_state = pp_set_clockgating_state,
304 	.set_powergating_state = pp_set_powergating_state,
305 	.dump_ip_state = NULL,
306 	.print_ip_state = NULL,
307 };
308 
309 const struct amdgpu_ip_block_version pp_smu_ip_block =
310 {
311 	.type = AMD_IP_BLOCK_TYPE_SMC,
312 	.major = 1,
313 	.minor = 0,
314 	.rev = 0,
315 	.funcs = &pp_ip_funcs,
316 };
317 
318 /* This interface only be supported On Vi,
319  * because only smu7/8 can help to load gfx/sdma fw,
320  * smu need to be enabled before load other ip's fw.
321  * so call start smu to load smu7 fw and other ip's fw
322  */
323 static int pp_dpm_load_fw(void *handle)
324 {
325 	struct pp_hwmgr *hwmgr = handle;
326 
327 	if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
328 		return -EINVAL;
329 
330 	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
331 		pr_err("fw load failed\n");
332 		return -EINVAL;
333 	}
334 
335 	return 0;
336 }
337 
338 static int pp_dpm_fw_loading_complete(void *handle)
339 {
340 	return 0;
341 }
342 
343 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
344 {
345 	struct pp_hwmgr *hwmgr = handle;
346 
347 	if (!hwmgr || !hwmgr->pm_en)
348 		return -EINVAL;
349 
350 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
351 		pr_info_ratelimited("%s was not implemented.\n", __func__);
352 		return 0;
353 	}
354 
355 	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
356 }
357 
358 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
359 						enum amd_dpm_forced_level *level)
360 {
361 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
362 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
363 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
364 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
365 
366 	if (!(hwmgr->dpm_level & profile_mode_mask)) {
367 		/* enter umd pstate, save current level, disable gfx cg*/
368 		if (*level & profile_mode_mask) {
369 			hwmgr->saved_dpm_level = hwmgr->dpm_level;
370 			hwmgr->en_umd_pstate = true;
371 		}
372 	} else {
373 		/* exit umd pstate, restore level, enable gfx cg*/
374 		if (!(*level & profile_mode_mask)) {
375 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
376 				*level = hwmgr->saved_dpm_level;
377 			hwmgr->en_umd_pstate = false;
378 		}
379 	}
380 }
381 
382 static int pp_dpm_force_performance_level(void *handle,
383 					enum amd_dpm_forced_level level)
384 {
385 	struct pp_hwmgr *hwmgr = handle;
386 
387 	if (!hwmgr || !hwmgr->pm_en)
388 		return -EINVAL;
389 
390 	if (level == hwmgr->dpm_level)
391 		return 0;
392 
393 	pp_dpm_en_umd_pstate(hwmgr, &level);
394 	hwmgr->request_dpm_level = level;
395 	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
396 
397 	return 0;
398 }
399 
400 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
401 								void *handle)
402 {
403 	struct pp_hwmgr *hwmgr = handle;
404 
405 	if (!hwmgr || !hwmgr->pm_en)
406 		return -EINVAL;
407 
408 	return hwmgr->dpm_level;
409 }
410 
411 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
412 {
413 	struct pp_hwmgr *hwmgr = handle;
414 
415 	if (!hwmgr || !hwmgr->pm_en)
416 		return 0;
417 
418 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
419 		pr_info_ratelimited("%s was not implemented.\n", __func__);
420 		return 0;
421 	}
422 	return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
423 }
424 
425 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
426 {
427 	struct pp_hwmgr *hwmgr = handle;
428 
429 	if (!hwmgr || !hwmgr->pm_en)
430 		return 0;
431 
432 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
433 		pr_info_ratelimited("%s was not implemented.\n", __func__);
434 		return 0;
435 	}
436 	return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
437 }
438 
439 static void pp_dpm_powergate_vce(void *handle, bool gate)
440 {
441 	struct pp_hwmgr *hwmgr = handle;
442 
443 	if (!hwmgr || !hwmgr->pm_en)
444 		return;
445 
446 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
447 		pr_info_ratelimited("%s was not implemented.\n", __func__);
448 		return;
449 	}
450 	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
451 }
452 
453 static void pp_dpm_powergate_uvd(void *handle, bool gate)
454 {
455 	struct pp_hwmgr *hwmgr = handle;
456 
457 	if (!hwmgr || !hwmgr->pm_en)
458 		return;
459 
460 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
461 		pr_info_ratelimited("%s was not implemented.\n", __func__);
462 		return;
463 	}
464 	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
465 }
466 
467 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
468 		enum amd_pm_state_type *user_state)
469 {
470 	struct pp_hwmgr *hwmgr = handle;
471 
472 	if (!hwmgr || !hwmgr->pm_en)
473 		return -EINVAL;
474 
475 	return hwmgr_handle_task(hwmgr, task_id, user_state);
476 }
477 
478 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
479 {
480 	struct pp_hwmgr *hwmgr = handle;
481 	struct pp_power_state *state;
482 	enum amd_pm_state_type pm_type;
483 
484 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
485 		return -EINVAL;
486 
487 	state = hwmgr->current_ps;
488 
489 	switch (state->classification.ui_label) {
490 	case PP_StateUILabel_Battery:
491 		pm_type = POWER_STATE_TYPE_BATTERY;
492 		break;
493 	case PP_StateUILabel_Balanced:
494 		pm_type = POWER_STATE_TYPE_BALANCED;
495 		break;
496 	case PP_StateUILabel_Performance:
497 		pm_type = POWER_STATE_TYPE_PERFORMANCE;
498 		break;
499 	default:
500 		if (state->classification.flags & PP_StateClassificationFlag_Boot)
501 			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
502 		else
503 			pm_type = POWER_STATE_TYPE_DEFAULT;
504 		break;
505 	}
506 
507 	return pm_type;
508 }
509 
510 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
511 {
512 	struct pp_hwmgr *hwmgr = handle;
513 
514 	if (!hwmgr || !hwmgr->pm_en)
515 		return -EOPNOTSUPP;
516 
517 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
518 		return -EOPNOTSUPP;
519 
520 	if (mode == U32_MAX)
521 		return -EINVAL;
522 
523 	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
524 
525 	return 0;
526 }
527 
528 static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
529 {
530 	struct pp_hwmgr *hwmgr = handle;
531 
532 	if (!hwmgr || !hwmgr->pm_en)
533 		return -EOPNOTSUPP;
534 
535 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
536 		return -EOPNOTSUPP;
537 
538 	if (!fan_mode)
539 		return -EINVAL;
540 
541 	*fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
542 	return 0;
543 }
544 
545 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
546 {
547 	struct pp_hwmgr *hwmgr = handle;
548 
549 	if (!hwmgr || !hwmgr->pm_en)
550 		return -EOPNOTSUPP;
551 
552 	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
553 		return -EOPNOTSUPP;
554 
555 	if (speed == U32_MAX)
556 		return -EINVAL;
557 
558 	return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
559 }
560 
561 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
562 {
563 	struct pp_hwmgr *hwmgr = handle;
564 
565 	if (!hwmgr || !hwmgr->pm_en)
566 		return -EOPNOTSUPP;
567 
568 	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
569 		return -EOPNOTSUPP;
570 
571 	if (!speed)
572 		return -EINVAL;
573 
574 	return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
575 }
576 
577 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
578 {
579 	struct pp_hwmgr *hwmgr = handle;
580 
581 	if (!hwmgr || !hwmgr->pm_en)
582 		return -EOPNOTSUPP;
583 
584 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
585 		return -EOPNOTSUPP;
586 
587 	if (!rpm)
588 		return -EINVAL;
589 
590 	return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
591 }
592 
593 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
594 {
595 	struct pp_hwmgr *hwmgr = handle;
596 
597 	if (!hwmgr || !hwmgr->pm_en)
598 		return -EOPNOTSUPP;
599 
600 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
601 		return -EOPNOTSUPP;
602 
603 	if (rpm == U32_MAX)
604 		return -EINVAL;
605 
606 	return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
607 }
608 
609 static int pp_dpm_get_pp_num_states(void *handle,
610 		struct pp_states_info *data)
611 {
612 	struct pp_hwmgr *hwmgr = handle;
613 	int i;
614 
615 	memset(data, 0, sizeof(*data));
616 
617 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps)
618 		return -EINVAL;
619 
620 	data->nums = hwmgr->num_ps;
621 
622 	for (i = 0; i < hwmgr->num_ps; i++) {
623 		struct pp_power_state *state = (struct pp_power_state *)
624 				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
625 		switch (state->classification.ui_label) {
626 		case PP_StateUILabel_Battery:
627 			data->states[i] = POWER_STATE_TYPE_BATTERY;
628 			break;
629 		case PP_StateUILabel_Balanced:
630 			data->states[i] = POWER_STATE_TYPE_BALANCED;
631 			break;
632 		case PP_StateUILabel_Performance:
633 			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
634 			break;
635 		default:
636 			if (state->classification.flags & PP_StateClassificationFlag_Boot)
637 				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
638 			else
639 				data->states[i] = POWER_STATE_TYPE_DEFAULT;
640 		}
641 	}
642 	return 0;
643 }
644 
645 static int pp_dpm_get_pp_table(void *handle, char **table)
646 {
647 	struct pp_hwmgr *hwmgr = handle;
648 
649 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
650 		return -EINVAL;
651 
652 	*table = (char *)hwmgr->soft_pp_table;
653 	return hwmgr->soft_pp_table_size;
654 }
655 
656 static int amd_powerplay_reset(void *handle)
657 {
658 	struct pp_hwmgr *hwmgr = handle;
659 	int ret;
660 
661 	ret = hwmgr_hw_fini(hwmgr);
662 	if (ret)
663 		return ret;
664 
665 	ret = hwmgr_hw_init(hwmgr);
666 	if (ret)
667 		return ret;
668 
669 	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
670 }
671 
672 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
673 {
674 	struct pp_hwmgr *hwmgr = handle;
675 	int ret = -ENOMEM;
676 
677 	if (!hwmgr || !hwmgr->pm_en)
678 		return -EINVAL;
679 
680 	if (!hwmgr->hardcode_pp_table) {
681 		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
682 						   hwmgr->soft_pp_table_size,
683 						   GFP_KERNEL);
684 		if (!hwmgr->hardcode_pp_table)
685 			return ret;
686 	}
687 
688 	memcpy(hwmgr->hardcode_pp_table, buf, size);
689 
690 	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
691 
692 	ret = amd_powerplay_reset(handle);
693 	if (ret)
694 		return ret;
695 
696 	if (hwmgr->hwmgr_func->avfs_control)
697 		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
698 
699 	return ret;
700 }
701 
702 static int pp_dpm_force_clock_level(void *handle,
703 		enum pp_clock_type type, uint32_t mask)
704 {
705 	struct pp_hwmgr *hwmgr = handle;
706 
707 	if (!hwmgr || !hwmgr->pm_en)
708 		return -EINVAL;
709 
710 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
711 		pr_info_ratelimited("%s was not implemented.\n", __func__);
712 		return 0;
713 	}
714 
715 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
716 		pr_debug("force clock level is for dpm manual mode only.\n");
717 		return -EINVAL;
718 	}
719 
720 	return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
721 }
722 
723 static int pp_dpm_emit_clock_levels(void *handle,
724 				    enum pp_clock_type type,
725 				    char *buf,
726 				    int *offset)
727 {
728 	struct pp_hwmgr *hwmgr = handle;
729 
730 	if (!hwmgr || !hwmgr->pm_en)
731 		return -EOPNOTSUPP;
732 
733 	if (!hwmgr->hwmgr_func->emit_clock_levels)
734 		return -ENOENT;
735 
736 	return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
737 }
738 
739 static int pp_dpm_print_clock_levels(void *handle,
740 		enum pp_clock_type type, char *buf)
741 {
742 	struct pp_hwmgr *hwmgr = handle;
743 
744 	if (!hwmgr || !hwmgr->pm_en)
745 		return -EINVAL;
746 
747 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
748 		pr_info_ratelimited("%s was not implemented.\n", __func__);
749 		return 0;
750 	}
751 	return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
752 }
753 
754 static int pp_dpm_get_sclk_od(void *handle)
755 {
756 	struct pp_hwmgr *hwmgr = handle;
757 
758 	if (!hwmgr || !hwmgr->pm_en)
759 		return -EINVAL;
760 
761 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
762 		pr_info_ratelimited("%s was not implemented.\n", __func__);
763 		return 0;
764 	}
765 	return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
766 }
767 
768 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
769 {
770 	struct pp_hwmgr *hwmgr = handle;
771 
772 	if (!hwmgr || !hwmgr->pm_en)
773 		return -EINVAL;
774 
775 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
776 		pr_info_ratelimited("%s was not implemented.\n", __func__);
777 		return 0;
778 	}
779 
780 	return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
781 }
782 
783 static int pp_dpm_get_mclk_od(void *handle)
784 {
785 	struct pp_hwmgr *hwmgr = handle;
786 
787 	if (!hwmgr || !hwmgr->pm_en)
788 		return -EINVAL;
789 
790 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
791 		pr_info_ratelimited("%s was not implemented.\n", __func__);
792 		return 0;
793 	}
794 	return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
795 }
796 
797 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
798 {
799 	struct pp_hwmgr *hwmgr = handle;
800 
801 	if (!hwmgr || !hwmgr->pm_en)
802 		return -EINVAL;
803 
804 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
805 		pr_info_ratelimited("%s was not implemented.\n", __func__);
806 		return 0;
807 	}
808 	return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
809 }
810 
811 static int pp_dpm_read_sensor(void *handle, int idx,
812 			      void *value, int *size)
813 {
814 	struct pp_hwmgr *hwmgr = handle;
815 
816 	if (!hwmgr || !hwmgr->pm_en || !value)
817 		return -EINVAL;
818 
819 	switch (idx) {
820 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
821 		*((uint32_t *)value) = hwmgr->pstate_sclk * 100;
822 		return 0;
823 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
824 		*((uint32_t *)value) = hwmgr->pstate_mclk * 100;
825 		return 0;
826 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
827 		*((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
828 		return 0;
829 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
830 		*((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
831 		return 0;
832 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
833 		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
834 		return 0;
835 	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
836 		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
837 		return 0;
838 	default:
839 		return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
840 	}
841 }
842 
843 static struct amd_vce_state*
844 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
845 {
846 	struct pp_hwmgr *hwmgr = handle;
847 
848 	if (!hwmgr || !hwmgr->pm_en)
849 		return NULL;
850 
851 	if (idx < hwmgr->num_vce_state_tables)
852 		return &hwmgr->vce_states[idx];
853 	return NULL;
854 }
855 
856 static int pp_get_power_profile_mode(void *handle, char *buf)
857 {
858 	struct pp_hwmgr *hwmgr = handle;
859 
860 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
861 		return -EOPNOTSUPP;
862 	if (!buf)
863 		return -EINVAL;
864 
865 	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
866 }
867 
868 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
869 {
870 	struct pp_hwmgr *hwmgr = handle;
871 
872 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
873 		return -EOPNOTSUPP;
874 
875 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
876 		pr_debug("power profile setting is for manual dpm mode only.\n");
877 		return -EINVAL;
878 	}
879 
880 	return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
881 }
882 
883 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
884 {
885 	struct pp_hwmgr *hwmgr = handle;
886 
887 	if (!hwmgr || !hwmgr->pm_en)
888 		return -EINVAL;
889 
890 	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
891 		return 0;
892 
893 	return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
894 }
895 
896 static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
897 				 long *input, uint32_t size)
898 {
899 	struct pp_hwmgr *hwmgr = handle;
900 
901 	if (!hwmgr || !hwmgr->pm_en)
902 		return -EINVAL;
903 
904 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
905 		pr_info_ratelimited("%s was not implemented.\n", __func__);
906 		return 0;
907 	}
908 
909 	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
910 }
911 
912 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
913 {
914 	struct pp_hwmgr *hwmgr = handle;
915 
916 	if (!hwmgr)
917 		return -EINVAL;
918 
919 	if (!hwmgr->pm_en)
920 		return 0;
921 
922 	if (hwmgr->hwmgr_func->set_mp1_state)
923 		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
924 
925 	return 0;
926 }
927 
928 static int pp_dpm_switch_power_profile(void *handle,
929 		enum PP_SMC_POWER_PROFILE type, bool en)
930 {
931 	struct pp_hwmgr *hwmgr = handle;
932 	long workload[1];
933 	uint32_t index;
934 
935 	if (!hwmgr || !hwmgr->pm_en)
936 		return -EINVAL;
937 
938 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
939 		pr_info_ratelimited("%s was not implemented.\n", __func__);
940 		return -EINVAL;
941 	}
942 
943 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
944 		return -EINVAL;
945 
946 	if (!en) {
947 		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
948 		index = fls(hwmgr->workload_mask);
949 		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
950 		workload[0] = hwmgr->workload_setting[index];
951 	} else {
952 		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
953 		index = fls(hwmgr->workload_mask);
954 		index = index <= Workload_Policy_Max ? index - 1 : 0;
955 		workload[0] = hwmgr->workload_setting[index];
956 	}
957 
958 	if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
959 		hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
960 			if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
961 				return -EINVAL;
962 	}
963 
964 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
965 		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
966 
967 	return 0;
968 }
969 
970 static int pp_set_power_limit(void *handle, uint32_t limit)
971 {
972 	struct pp_hwmgr *hwmgr = handle;
973 	uint32_t max_power_limit;
974 
975 	if (!hwmgr || !hwmgr->pm_en)
976 		return -EINVAL;
977 
978 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
979 		pr_info_ratelimited("%s was not implemented.\n", __func__);
980 		return -EINVAL;
981 	}
982 
983 	if (limit == 0)
984 		limit = hwmgr->default_power_limit;
985 
986 	max_power_limit = hwmgr->default_power_limit;
987 	if (hwmgr->od_enabled) {
988 		max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
989 		max_power_limit /= 100;
990 	}
991 
992 	if (limit > max_power_limit)
993 		return -EINVAL;
994 
995 	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
996 	hwmgr->power_limit = limit;
997 	return 0;
998 }
999 
1000 static int pp_get_power_limit(void *handle, uint32_t *limit,
1001 			      enum pp_power_limit_level pp_limit_level,
1002 			      enum pp_power_type power_type)
1003 {
1004 	struct pp_hwmgr *hwmgr = handle;
1005 	int ret = 0;
1006 
1007 	if (!hwmgr || !hwmgr->pm_en || !limit)
1008 		return -EINVAL;
1009 
1010 	if (power_type != PP_PWR_TYPE_SUSTAINED)
1011 		return -EOPNOTSUPP;
1012 
1013 	switch (pp_limit_level) {
1014 		case PP_PWR_LIMIT_CURRENT:
1015 			*limit = hwmgr->power_limit;
1016 			break;
1017 		case PP_PWR_LIMIT_DEFAULT:
1018 			*limit = hwmgr->default_power_limit;
1019 			break;
1020 		case PP_PWR_LIMIT_MAX:
1021 			*limit = hwmgr->default_power_limit;
1022 			if (hwmgr->od_enabled) {
1023 				*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1024 				*limit /= 100;
1025 			}
1026 			break;
1027 		case PP_PWR_LIMIT_MIN:
1028 			*limit = 0;
1029 			break;
1030 		default:
1031 			ret = -EOPNOTSUPP;
1032 			break;
1033 	}
1034 
1035 	return ret;
1036 }
1037 
1038 static int pp_display_configuration_change(void *handle,
1039 	const struct amd_pp_display_configuration *display_config)
1040 {
1041 	struct pp_hwmgr *hwmgr = handle;
1042 
1043 	if (!hwmgr || !hwmgr->pm_en)
1044 		return -EINVAL;
1045 
1046 	phm_store_dal_configuration_data(hwmgr, display_config);
1047 	return 0;
1048 }
1049 
1050 static int pp_get_display_power_level(void *handle,
1051 		struct amd_pp_simple_clock_info *output)
1052 {
1053 	struct pp_hwmgr *hwmgr = handle;
1054 
1055 	if (!hwmgr || !hwmgr->pm_en || !output)
1056 		return -EINVAL;
1057 
1058 	return phm_get_dal_power_level(hwmgr, output);
1059 }
1060 
1061 static int pp_get_current_clocks(void *handle,
1062 		struct amd_pp_clock_info *clocks)
1063 {
1064 	struct amd_pp_simple_clock_info simple_clocks = { 0 };
1065 	struct pp_clock_info hw_clocks;
1066 	struct pp_hwmgr *hwmgr = handle;
1067 	int ret = 0;
1068 
1069 	if (!hwmgr || !hwmgr->pm_en)
1070 		return -EINVAL;
1071 
1072 	phm_get_dal_power_level(hwmgr, &simple_clocks);
1073 
1074 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1075 					PHM_PlatformCaps_PowerContainment))
1076 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1077 					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1078 	else
1079 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1080 					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1081 
1082 	if (ret) {
1083 		pr_debug("Error in phm_get_clock_info \n");
1084 		return -EINVAL;
1085 	}
1086 
1087 	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1088 	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1089 	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1090 	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1091 	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1092 	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1093 
1094 	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1095 	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1096 
1097 	if (simple_clocks.level == 0)
1098 		clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1099 	else
1100 		clocks->max_clocks_state = simple_clocks.level;
1101 
1102 	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1103 		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1104 		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1105 	}
1106 	return 0;
1107 }
1108 
1109 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1110 {
1111 	struct pp_hwmgr *hwmgr = handle;
1112 
1113 	if (!hwmgr || !hwmgr->pm_en)
1114 		return -EINVAL;
1115 
1116 	if (clocks == NULL)
1117 		return -EINVAL;
1118 
1119 	return phm_get_clock_by_type(hwmgr, type, clocks);
1120 }
1121 
1122 static int pp_get_clock_by_type_with_latency(void *handle,
1123 		enum amd_pp_clock_type type,
1124 		struct pp_clock_levels_with_latency *clocks)
1125 {
1126 	struct pp_hwmgr *hwmgr = handle;
1127 
1128 	if (!hwmgr || !hwmgr->pm_en || !clocks)
1129 		return -EINVAL;
1130 
1131 	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1132 }
1133 
1134 static int pp_get_clock_by_type_with_voltage(void *handle,
1135 		enum amd_pp_clock_type type,
1136 		struct pp_clock_levels_with_voltage *clocks)
1137 {
1138 	struct pp_hwmgr *hwmgr = handle;
1139 
1140 	if (!hwmgr || !hwmgr->pm_en || !clocks)
1141 		return -EINVAL;
1142 
1143 	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1144 }
1145 
1146 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1147 		void *clock_ranges)
1148 {
1149 	struct pp_hwmgr *hwmgr = handle;
1150 
1151 	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1152 		return -EINVAL;
1153 
1154 	return phm_set_watermarks_for_clocks_ranges(hwmgr,
1155 						    clock_ranges);
1156 }
1157 
1158 static int pp_display_clock_voltage_request(void *handle,
1159 		struct pp_display_clock_request *clock)
1160 {
1161 	struct pp_hwmgr *hwmgr = handle;
1162 
1163 	if (!hwmgr || !hwmgr->pm_en || !clock)
1164 		return -EINVAL;
1165 
1166 	return phm_display_clock_voltage_request(hwmgr, clock);
1167 }
1168 
1169 static int pp_get_display_mode_validation_clocks(void *handle,
1170 		struct amd_pp_simple_clock_info *clocks)
1171 {
1172 	struct pp_hwmgr *hwmgr = handle;
1173 	int ret = 0;
1174 
1175 	if (!hwmgr || !hwmgr->pm_en || !clocks)
1176 		return -EINVAL;
1177 
1178 	clocks->level = PP_DAL_POWERLEVEL_7;
1179 
1180 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1181 		ret = phm_get_max_high_clocks(hwmgr, clocks);
1182 
1183 	return ret;
1184 }
1185 
1186 static int pp_dpm_powergate_mmhub(void *handle)
1187 {
1188 	struct pp_hwmgr *hwmgr = handle;
1189 
1190 	if (!hwmgr || !hwmgr->pm_en)
1191 		return -EINVAL;
1192 
1193 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1194 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1195 		return 0;
1196 	}
1197 
1198 	return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1199 }
1200 
1201 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1202 {
1203 	struct pp_hwmgr *hwmgr = handle;
1204 
1205 	if (!hwmgr || !hwmgr->pm_en)
1206 		return 0;
1207 
1208 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1209 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1210 		return 0;
1211 	}
1212 
1213 	return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1214 }
1215 
1216 static void pp_dpm_powergate_acp(void *handle, bool gate)
1217 {
1218 	struct pp_hwmgr *hwmgr = handle;
1219 
1220 	if (!hwmgr || !hwmgr->pm_en)
1221 		return;
1222 
1223 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1224 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1225 		return;
1226 	}
1227 
1228 	hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1229 }
1230 
1231 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1232 {
1233 	struct pp_hwmgr *hwmgr = handle;
1234 
1235 	if (!hwmgr)
1236 		return;
1237 
1238 	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1239 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1240 		return;
1241 	}
1242 
1243 	hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1244 }
1245 
1246 static int pp_set_powergating_by_smu(void *handle,
1247 				uint32_t block_type, bool gate)
1248 {
1249 	int ret = 0;
1250 
1251 	switch (block_type) {
1252 	case AMD_IP_BLOCK_TYPE_UVD:
1253 	case AMD_IP_BLOCK_TYPE_VCN:
1254 		pp_dpm_powergate_uvd(handle, gate);
1255 		break;
1256 	case AMD_IP_BLOCK_TYPE_VCE:
1257 		pp_dpm_powergate_vce(handle, gate);
1258 		break;
1259 	case AMD_IP_BLOCK_TYPE_GMC:
1260 		/*
1261 		 * For now, this is only used on PICASSO.
1262 		 * And only "gate" operation is supported.
1263 		 */
1264 		if (gate)
1265 			pp_dpm_powergate_mmhub(handle);
1266 		break;
1267 	case AMD_IP_BLOCK_TYPE_GFX:
1268 		ret = pp_dpm_powergate_gfx(handle, gate);
1269 		break;
1270 	case AMD_IP_BLOCK_TYPE_ACP:
1271 		pp_dpm_powergate_acp(handle, gate);
1272 		break;
1273 	case AMD_IP_BLOCK_TYPE_SDMA:
1274 		pp_dpm_powergate_sdma(handle, gate);
1275 		break;
1276 	default:
1277 		break;
1278 	}
1279 	return ret;
1280 }
1281 
1282 static int pp_notify_smu_enable_pwe(void *handle)
1283 {
1284 	struct pp_hwmgr *hwmgr = handle;
1285 
1286 	if (!hwmgr || !hwmgr->pm_en)
1287 		return -EINVAL;
1288 
1289 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1290 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1291 		return -EINVAL;
1292 	}
1293 
1294 	hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1295 
1296 	return 0;
1297 }
1298 
1299 static int pp_enable_mgpu_fan_boost(void *handle)
1300 {
1301 	struct pp_hwmgr *hwmgr = handle;
1302 
1303 	if (!hwmgr)
1304 		return -EINVAL;
1305 
1306 	if (!hwmgr->pm_en ||
1307 	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1308 		return 0;
1309 
1310 	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1311 
1312 	return 0;
1313 }
1314 
1315 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1316 {
1317 	struct pp_hwmgr *hwmgr = handle;
1318 
1319 	if (!hwmgr || !hwmgr->pm_en)
1320 		return -EINVAL;
1321 
1322 	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1323 		pr_debug("%s was not implemented.\n", __func__);
1324 		return -EINVAL;
1325 	}
1326 
1327 	hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1328 
1329 	return 0;
1330 }
1331 
1332 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1333 {
1334 	struct pp_hwmgr *hwmgr = handle;
1335 
1336 	if (!hwmgr || !hwmgr->pm_en)
1337 		return -EINVAL;
1338 
1339 	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1340 		pr_debug("%s was not implemented.\n", __func__);
1341 		return -EINVAL;
1342 	}
1343 
1344 	hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1345 
1346 	return 0;
1347 }
1348 
1349 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1350 {
1351 	struct pp_hwmgr *hwmgr = handle;
1352 
1353 	if (!hwmgr || !hwmgr->pm_en)
1354 		return -EINVAL;
1355 
1356 	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1357 		pr_debug("%s was not implemented.\n", __func__);
1358 		return -EINVAL;
1359 	}
1360 
1361 	hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1362 
1363 	return 0;
1364 }
1365 
1366 static int pp_set_active_display_count(void *handle, uint32_t count)
1367 {
1368 	struct pp_hwmgr *hwmgr = handle;
1369 
1370 	if (!hwmgr || !hwmgr->pm_en)
1371 		return -EINVAL;
1372 
1373 	return phm_set_active_display_count(hwmgr, count);
1374 }
1375 
1376 static int pp_get_asic_baco_capability(void *handle)
1377 {
1378 	struct pp_hwmgr *hwmgr = handle;
1379 
1380 	if (!hwmgr)
1381 		return false;
1382 
1383 	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1384 		!hwmgr->hwmgr_func->get_bamaco_support)
1385 		return false;
1386 
1387 	return hwmgr->hwmgr_func->get_bamaco_support(hwmgr);
1388 }
1389 
1390 static int pp_get_asic_baco_state(void *handle, int *state)
1391 {
1392 	struct pp_hwmgr *hwmgr = handle;
1393 
1394 	if (!hwmgr)
1395 		return -EINVAL;
1396 
1397 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1398 		return 0;
1399 
1400 	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1401 
1402 	return 0;
1403 }
1404 
1405 static int pp_set_asic_baco_state(void *handle, int state)
1406 {
1407 	struct pp_hwmgr *hwmgr = handle;
1408 
1409 	if (!hwmgr)
1410 		return -EINVAL;
1411 
1412 	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1413 		!hwmgr->hwmgr_func->set_asic_baco_state)
1414 		return 0;
1415 
1416 	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1417 
1418 	return 0;
1419 }
1420 
1421 static int pp_get_ppfeature_status(void *handle, char *buf)
1422 {
1423 	struct pp_hwmgr *hwmgr = handle;
1424 
1425 	if (!hwmgr || !hwmgr->pm_en || !buf)
1426 		return -EINVAL;
1427 
1428 	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1429 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1430 		return -EINVAL;
1431 	}
1432 
1433 	return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1434 }
1435 
1436 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1437 {
1438 	struct pp_hwmgr *hwmgr = handle;
1439 
1440 	if (!hwmgr || !hwmgr->pm_en)
1441 		return -EINVAL;
1442 
1443 	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1444 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1445 		return -EINVAL;
1446 	}
1447 
1448 	return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1449 }
1450 
1451 static int pp_asic_reset_mode_2(void *handle)
1452 {
1453 	struct pp_hwmgr *hwmgr = handle;
1454 
1455 	if (!hwmgr || !hwmgr->pm_en)
1456 		return -EINVAL;
1457 
1458 	if (hwmgr->hwmgr_func->asic_reset == NULL) {
1459 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1460 		return -EINVAL;
1461 	}
1462 
1463 	return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1464 }
1465 
1466 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1467 {
1468 	struct pp_hwmgr *hwmgr = handle;
1469 
1470 	if (!hwmgr || !hwmgr->pm_en)
1471 		return -EINVAL;
1472 
1473 	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1474 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1475 		return -EINVAL;
1476 	}
1477 
1478 	return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1479 }
1480 
1481 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1482 {
1483 	struct pp_hwmgr *hwmgr = handle;
1484 
1485 	if (!hwmgr)
1486 		return -EINVAL;
1487 
1488 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1489 		return 0;
1490 
1491 	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1492 
1493 	return 0;
1494 }
1495 
1496 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1497 {
1498 	struct pp_hwmgr *hwmgr = handle;
1499 
1500 	if (!hwmgr)
1501 		return -EINVAL;
1502 
1503 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1504 		return 0;
1505 
1506 	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1507 
1508 	return 0;
1509 }
1510 
1511 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1512 {
1513 	struct pp_hwmgr *hwmgr = handle;
1514 
1515 	if (!hwmgr)
1516 		return -EINVAL;
1517 
1518 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1519 		return -EOPNOTSUPP;
1520 
1521 	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1522 }
1523 
1524 static int pp_gfx_state_change_set(void *handle, uint32_t state)
1525 {
1526 	struct pp_hwmgr *hwmgr = handle;
1527 
1528 	if (!hwmgr || !hwmgr->pm_en)
1529 		return -EINVAL;
1530 
1531 	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1532 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1533 		return -EINVAL;
1534 	}
1535 
1536 	hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1537 	return 0;
1538 }
1539 
1540 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1541 {
1542 	struct pp_hwmgr *hwmgr = handle;
1543 	struct amdgpu_device *adev = hwmgr->adev;
1544 	int err;
1545 
1546 	if (!addr || !size)
1547 		return -EINVAL;
1548 
1549 	*addr = NULL;
1550 	*size = 0;
1551 	if (adev->pm.smu_prv_buffer) {
1552 		err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1553 		if (err)
1554 			return err;
1555 		*size = adev->pm.smu_prv_buffer_size;
1556 	}
1557 
1558 	return 0;
1559 }
1560 
1561 static void pp_pm_compute_clocks(void *handle)
1562 {
1563 	struct pp_hwmgr *hwmgr = handle;
1564 	struct amdgpu_device *adev = hwmgr->adev;
1565 
1566 	if (!adev->dc_enabled) {
1567 		amdgpu_dpm_get_active_displays(adev);
1568 		adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1569 		adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1570 		adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1571 		/* we have issues with mclk switching with
1572 		 * refresh rates over 120 hz on the non-DC code.
1573 		 */
1574 		if (adev->pm.pm_display_cfg.vrefresh > 120)
1575 			adev->pm.pm_display_cfg.min_vblank_time = 0;
1576 
1577 		pp_display_configuration_change(handle,
1578 						&adev->pm.pm_display_cfg);
1579 	}
1580 
1581 	pp_dpm_dispatch_tasks(handle,
1582 			      AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1583 			      NULL);
1584 }
1585 
1586 static const struct amd_pm_funcs pp_dpm_funcs = {
1587 	.load_firmware = pp_dpm_load_fw,
1588 	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1589 	.force_performance_level = pp_dpm_force_performance_level,
1590 	.get_performance_level = pp_dpm_get_performance_level,
1591 	.get_current_power_state = pp_dpm_get_current_power_state,
1592 	.dispatch_tasks = pp_dpm_dispatch_tasks,
1593 	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
1594 	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
1595 	.set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1596 	.get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1597 	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1598 	.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1599 	.get_pp_num_states = pp_dpm_get_pp_num_states,
1600 	.get_pp_table = pp_dpm_get_pp_table,
1601 	.set_pp_table = pp_dpm_set_pp_table,
1602 	.force_clock_level = pp_dpm_force_clock_level,
1603 	.emit_clock_levels = pp_dpm_emit_clock_levels,
1604 	.print_clock_levels = pp_dpm_print_clock_levels,
1605 	.get_sclk_od = pp_dpm_get_sclk_od,
1606 	.set_sclk_od = pp_dpm_set_sclk_od,
1607 	.get_mclk_od = pp_dpm_get_mclk_od,
1608 	.set_mclk_od = pp_dpm_set_mclk_od,
1609 	.read_sensor = pp_dpm_read_sensor,
1610 	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
1611 	.switch_power_profile = pp_dpm_switch_power_profile,
1612 	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1613 	.set_powergating_by_smu = pp_set_powergating_by_smu,
1614 	.get_power_profile_mode = pp_get_power_profile_mode,
1615 	.set_power_profile_mode = pp_set_power_profile_mode,
1616 	.set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1617 	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1618 	.set_mp1_state = pp_dpm_set_mp1_state,
1619 	.set_power_limit = pp_set_power_limit,
1620 	.get_power_limit = pp_get_power_limit,
1621 /* export to DC */
1622 	.get_sclk = pp_dpm_get_sclk,
1623 	.get_mclk = pp_dpm_get_mclk,
1624 	.display_configuration_change = pp_display_configuration_change,
1625 	.get_display_power_level = pp_get_display_power_level,
1626 	.get_current_clocks = pp_get_current_clocks,
1627 	.get_clock_by_type = pp_get_clock_by_type,
1628 	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1629 	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1630 	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1631 	.display_clock_voltage_request = pp_display_clock_voltage_request,
1632 	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1633 	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1634 	.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1635 	.set_active_display_count = pp_set_active_display_count,
1636 	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1637 	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1638 	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1639 	.get_asic_baco_capability = pp_get_asic_baco_capability,
1640 	.get_asic_baco_state = pp_get_asic_baco_state,
1641 	.set_asic_baco_state = pp_set_asic_baco_state,
1642 	.get_ppfeature_status = pp_get_ppfeature_status,
1643 	.set_ppfeature_status = pp_set_ppfeature_status,
1644 	.asic_reset_mode_2 = pp_asic_reset_mode_2,
1645 	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
1646 	.set_df_cstate = pp_set_df_cstate,
1647 	.set_xgmi_pstate = pp_set_xgmi_pstate,
1648 	.get_gpu_metrics = pp_get_gpu_metrics,
1649 	.gfx_state_change_set = pp_gfx_state_change_set,
1650 	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
1651 	.pm_compute_clocks = pp_pm_compute_clocks,
1652 };
1653