xref: /linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c (revision ec8a42e7343234802b9054874fe01810880289ce)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/errno.h>
25 #include "hwmgr.h"
26 #include "hardwaremanager.h"
27 #include "power_state.h"
28 
29 
30 #define TEMP_RANGE_MIN (0)
31 #define TEMP_RANGE_MAX (80 * 1000)
32 
33 #define PHM_FUNC_CHECK(hw) \
34 	do {							\
35 		if ((hw) == NULL || (hw)->hwmgr_func == NULL)	\
36 			return -EINVAL;				\
37 	} while (0)
38 
39 int phm_setup_asic(struct pp_hwmgr *hwmgr)
40 {
41 	PHM_FUNC_CHECK(hwmgr);
42 
43 	if (NULL != hwmgr->hwmgr_func->asic_setup)
44 		return hwmgr->hwmgr_func->asic_setup(hwmgr);
45 
46 	return 0;
47 }
48 
49 int phm_power_down_asic(struct pp_hwmgr *hwmgr)
50 {
51 	PHM_FUNC_CHECK(hwmgr);
52 
53 	if (NULL != hwmgr->hwmgr_func->power_off_asic)
54 		return hwmgr->hwmgr_func->power_off_asic(hwmgr);
55 
56 	return 0;
57 }
58 
59 int phm_set_power_state(struct pp_hwmgr *hwmgr,
60 		    const struct pp_hw_power_state *pcurrent_state,
61 		    const struct pp_hw_power_state *pnew_power_state)
62 {
63 	struct phm_set_power_state_input states;
64 
65 	PHM_FUNC_CHECK(hwmgr);
66 
67 	states.pcurrent_state = pcurrent_state;
68 	states.pnew_state = pnew_power_state;
69 
70 	if (NULL != hwmgr->hwmgr_func->power_state_set)
71 		return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
72 
73 	return 0;
74 }
75 
76 int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
77 {
78 	struct amdgpu_device *adev = NULL;
79 	int ret = -EINVAL;
80 	PHM_FUNC_CHECK(hwmgr);
81 	adev = hwmgr->adev;
82 
83 	/* Skip for suspend/resume case */
84 	if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr)
85 	    && !amdgpu_passthrough(adev) && adev->in_suspend) {
86 		pr_info("dpm has been enabled\n");
87 		return 0;
88 	}
89 
90 	if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
91 		ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
92 
93 	return ret;
94 }
95 
96 int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
97 {
98 	int ret = -EINVAL;
99 
100 	PHM_FUNC_CHECK(hwmgr);
101 
102 	if (!hwmgr->not_vf)
103 		return 0;
104 
105 	if (!smum_is_dpm_running(hwmgr)) {
106 		pr_info("dpm has been disabled\n");
107 		return 0;
108 	}
109 
110 	if (hwmgr->hwmgr_func->dynamic_state_management_disable)
111 		ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
112 
113 	return ret;
114 }
115 
116 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
117 {
118 	int ret = 0;
119 
120 	PHM_FUNC_CHECK(hwmgr);
121 
122 	if (hwmgr->hwmgr_func->force_dpm_level != NULL)
123 		ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
124 
125 	return ret;
126 }
127 
128 int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
129 				   struct pp_power_state *adjusted_ps,
130 			     const struct pp_power_state *current_ps)
131 {
132 	PHM_FUNC_CHECK(hwmgr);
133 
134 	if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL)
135 		return hwmgr->hwmgr_func->apply_state_adjust_rules(
136 									hwmgr,
137 								 adjusted_ps,
138 								 current_ps);
139 	return 0;
140 }
141 
142 int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
143 {
144 	PHM_FUNC_CHECK(hwmgr);
145 
146 	if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL)
147 		return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr);
148 	return 0;
149 }
150 
151 int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
152 {
153 	PHM_FUNC_CHECK(hwmgr);
154 
155 	if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
156 		return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
157 	return 0;
158 }
159 
160 
161 int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
162 {
163 	PHM_FUNC_CHECK(hwmgr);
164 
165 	if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
166 		return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
167 
168 	return 0;
169 }
170 
171 int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
172 {
173 	PHM_FUNC_CHECK(hwmgr);
174 
175 	if (NULL != hwmgr->hwmgr_func->pre_display_config_changed)
176 		hwmgr->hwmgr_func->pre_display_config_changed(hwmgr);
177 
178 	return 0;
179 
180 }
181 
182 int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
183 {
184 	PHM_FUNC_CHECK(hwmgr);
185 
186 	if (NULL != hwmgr->hwmgr_func->display_config_changed)
187 		hwmgr->hwmgr_func->display_config_changed(hwmgr);
188 
189 	return 0;
190 }
191 
192 int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
193 {
194 	PHM_FUNC_CHECK(hwmgr);
195 
196 	if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
197 			hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
198 
199 	return 0;
200 }
201 
202 int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
203 {
204 	PHM_FUNC_CHECK(hwmgr);
205 
206 	if (!hwmgr->not_vf)
207 		return 0;
208 
209 	if (hwmgr->hwmgr_func->stop_thermal_controller == NULL)
210 		return -EINVAL;
211 
212 	return hwmgr->hwmgr_func->stop_thermal_controller(hwmgr);
213 }
214 
215 int phm_register_irq_handlers(struct pp_hwmgr *hwmgr)
216 {
217 	PHM_FUNC_CHECK(hwmgr);
218 
219 	if (hwmgr->hwmgr_func->register_irq_handlers != NULL)
220 		return hwmgr->hwmgr_func->register_irq_handlers(hwmgr);
221 
222 	return 0;
223 }
224 
225 /**
226  * phm_start_thermal_controller - Initializes the thermal controller subsystem.
227  *
228  * @hwmgr:   the address of the powerplay hardware manager.
229  * Exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher.
230  */
231 int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
232 {
233 	int ret = 0;
234 	struct PP_TemperatureRange range = {
235 		TEMP_RANGE_MIN,
236 		TEMP_RANGE_MAX,
237 		TEMP_RANGE_MAX,
238 		TEMP_RANGE_MIN,
239 		TEMP_RANGE_MAX,
240 		TEMP_RANGE_MAX,
241 		TEMP_RANGE_MIN,
242 		TEMP_RANGE_MAX,
243 		TEMP_RANGE_MAX};
244 	struct amdgpu_device *adev = hwmgr->adev;
245 
246 	if (!hwmgr->not_vf)
247 		return 0;
248 
249 	if (hwmgr->hwmgr_func->get_thermal_temperature_range)
250 		hwmgr->hwmgr_func->get_thermal_temperature_range(
251 				hwmgr, &range);
252 
253 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
254 			PHM_PlatformCaps_ThermalController)
255 			&& hwmgr->hwmgr_func->start_thermal_controller != NULL)
256 		ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
257 
258 	adev->pm.dpm.thermal.min_temp = range.min;
259 	adev->pm.dpm.thermal.max_temp = range.max;
260 	adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
261 	adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
262 	adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
263 	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
264 	adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
265 	adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
266 	adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
267 
268 	return ret;
269 }
270 
271 
272 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
273 {
274 	if (hwmgr == NULL ||
275 	    hwmgr->hwmgr_func == NULL)
276 		return false;
277 
278 	if (hwmgr->pp_one_vf)
279 		return false;
280 
281 	if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
282 		return false;
283 
284 	return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
285 }
286 
287 
288 int phm_check_states_equal(struct pp_hwmgr *hwmgr,
289 				 const struct pp_hw_power_state *pstate1,
290 				 const struct pp_hw_power_state *pstate2,
291 				 bool *equal)
292 {
293 	PHM_FUNC_CHECK(hwmgr);
294 
295 	if (hwmgr->hwmgr_func->check_states_equal == NULL)
296 		return -EINVAL;
297 
298 	return hwmgr->hwmgr_func->check_states_equal(hwmgr, pstate1, pstate2, equal);
299 }
300 
301 int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
302 		    const struct amd_pp_display_configuration *display_config)
303 {
304 	int index = 0;
305 	int number_of_active_display = 0;
306 
307 	PHM_FUNC_CHECK(hwmgr);
308 
309 	if (display_config == NULL)
310 		return -EINVAL;
311 
312 	if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
313 		hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
314 
315 	for (index = 0; index < display_config->num_path_including_non_display; index++) {
316 		if (display_config->displays[index].controller_id != 0)
317 			number_of_active_display++;
318 	}
319 
320 	if (NULL != hwmgr->hwmgr_func->set_active_display_count)
321 		hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display);
322 
323 	if (hwmgr->hwmgr_func->store_cc6_data == NULL)
324 		return -EINVAL;
325 
326 	/* TODO: pass other display configuration in the future */
327 
328 	if (hwmgr->hwmgr_func->store_cc6_data)
329 		hwmgr->hwmgr_func->store_cc6_data(hwmgr,
330 				display_config->cpu_pstate_separation_time,
331 				display_config->cpu_cc6_disable,
332 				display_config->cpu_pstate_disable,
333 				display_config->nb_pstate_switch_disable);
334 
335 	return 0;
336 }
337 
338 int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
339 		struct amd_pp_simple_clock_info *info)
340 {
341 	PHM_FUNC_CHECK(hwmgr);
342 
343 	if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL)
344 		return -EINVAL;
345 	return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info);
346 }
347 
348 int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr)
349 {
350 	PHM_FUNC_CHECK(hwmgr);
351 
352 	if (hwmgr->hwmgr_func->set_cpu_power_state != NULL)
353 		return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr);
354 
355 	return 0;
356 }
357 
358 
359 int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
360 				PHM_PerformanceLevelDesignation designation, uint32_t index,
361 				PHM_PerformanceLevel *level)
362 {
363 	PHM_FUNC_CHECK(hwmgr);
364 	if (hwmgr->hwmgr_func->get_performance_level == NULL)
365 		return -EINVAL;
366 
367 	return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level);
368 
369 
370 }
371 
372 
373 /**
374  * phm_get_clock_info
375  *
376  * @hwmgr:  the address of the powerplay hardware manager.
377  * @state: the address of the Power State structure.
378  * @pclock_info: the address of PP_ClockInfo structure where the result will be returned.
379  * @designation: PHM performance level designation
380  * Exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end.
381  */
382 int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info,
383 			PHM_PerformanceLevelDesignation designation)
384 {
385 	int result;
386 	PHM_PerformanceLevel performance_level = {0};
387 
388 	PHM_FUNC_CHECK(hwmgr);
389 
390 	PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL);
391 	PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL);
392 
393 	result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level);
394 
395 	PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result);
396 
397 
398 	pclock_info->min_mem_clk = performance_level.memory_clock;
399 	pclock_info->min_eng_clk = performance_level.coreClock;
400 	pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
401 
402 
403 	result = phm_get_performance_level(hwmgr, state, designation,
404 					(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level);
405 
406 	PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result);
407 
408 	pclock_info->max_mem_clk = performance_level.memory_clock;
409 	pclock_info->max_eng_clk = performance_level.coreClock;
410 	pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
411 
412 	return 0;
413 }
414 
415 int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
416 {
417 	PHM_FUNC_CHECK(hwmgr);
418 
419 	if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL)
420 		return -EINVAL;
421 
422 	return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info);
423 
424 }
425 
426 int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
427 {
428 	PHM_FUNC_CHECK(hwmgr);
429 
430 	if (hwmgr->hwmgr_func->get_clock_by_type == NULL)
431 		return -EINVAL;
432 
433 	return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks);
434 
435 }
436 
437 int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
438 		enum amd_pp_clock_type type,
439 		struct pp_clock_levels_with_latency *clocks)
440 {
441 	PHM_FUNC_CHECK(hwmgr);
442 
443 	if (hwmgr->hwmgr_func->get_clock_by_type_with_latency == NULL)
444 		return -EINVAL;
445 
446 	return hwmgr->hwmgr_func->get_clock_by_type_with_latency(hwmgr, type, clocks);
447 
448 }
449 
450 int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
451 		enum amd_pp_clock_type type,
452 		struct pp_clock_levels_with_voltage *clocks)
453 {
454 	PHM_FUNC_CHECK(hwmgr);
455 
456 	if (hwmgr->hwmgr_func->get_clock_by_type_with_voltage == NULL)
457 		return -EINVAL;
458 
459 	return hwmgr->hwmgr_func->get_clock_by_type_with_voltage(hwmgr, type, clocks);
460 
461 }
462 
463 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
464 					void *clock_ranges)
465 {
466 	PHM_FUNC_CHECK(hwmgr);
467 
468 	if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges)
469 		return -EINVAL;
470 
471 	return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
472 								clock_ranges);
473 }
474 
475 int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
476 		struct pp_display_clock_request *clock)
477 {
478 	PHM_FUNC_CHECK(hwmgr);
479 
480 	if (!hwmgr->hwmgr_func->display_clock_voltage_request)
481 		return -EINVAL;
482 
483 	return hwmgr->hwmgr_func->display_clock_voltage_request(hwmgr, clock);
484 }
485 
486 int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
487 {
488 	PHM_FUNC_CHECK(hwmgr);
489 
490 	if (hwmgr->hwmgr_func->get_max_high_clocks == NULL)
491 		return -EINVAL;
492 
493 	return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
494 }
495 
496 int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
497 {
498 	PHM_FUNC_CHECK(hwmgr);
499 
500 	if (!hwmgr->not_vf)
501 		return 0;
502 
503 	if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
504 		return -EINVAL;
505 
506 	return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
507 }
508 
509 int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
510 {
511 	PHM_FUNC_CHECK(hwmgr);
512 
513 	if (!hwmgr->hwmgr_func->set_active_display_count)
514 		return -EINVAL;
515 
516 	return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
517 }
518