xref: /linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c (revision c83b49383b595be50647f0c764a48c78b5f3c4f8)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/errno.h>
25 #include "hwmgr.h"
26 #include "hardwaremanager.h"
27 #include "power_state.h"
28 
29 
30 #define TEMP_RANGE_MIN (0)
31 #define TEMP_RANGE_MAX (80 * 1000)
32 
33 #define PHM_FUNC_CHECK(hw) \
34 	do {							\
35 		if ((hw) == NULL || (hw)->hwmgr_func == NULL)	\
36 			return -EINVAL;				\
37 	} while (0)
38 
39 int phm_setup_asic(struct pp_hwmgr *hwmgr)
40 {
41 	PHM_FUNC_CHECK(hwmgr);
42 
43 	if (NULL != hwmgr->hwmgr_func->asic_setup)
44 		return hwmgr->hwmgr_func->asic_setup(hwmgr);
45 
46 	return 0;
47 }
48 
49 int phm_power_down_asic(struct pp_hwmgr *hwmgr)
50 {
51 	PHM_FUNC_CHECK(hwmgr);
52 
53 	if (NULL != hwmgr->hwmgr_func->power_off_asic)
54 		return hwmgr->hwmgr_func->power_off_asic(hwmgr);
55 
56 	return 0;
57 }
58 
59 int phm_set_power_state(struct pp_hwmgr *hwmgr,
60 		    const struct pp_hw_power_state *pcurrent_state,
61 		    const struct pp_hw_power_state *pnew_power_state)
62 {
63 	struct phm_set_power_state_input states;
64 
65 	PHM_FUNC_CHECK(hwmgr);
66 
67 	states.pcurrent_state = pcurrent_state;
68 	states.pnew_state = pnew_power_state;
69 
70 	if (NULL != hwmgr->hwmgr_func->power_state_set)
71 		return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
72 
73 	return 0;
74 }
75 
76 int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
77 {
78 	struct amdgpu_device *adev = NULL;
79 	int ret = -EINVAL;
80 	PHM_FUNC_CHECK(hwmgr);
81 	adev = hwmgr->adev;
82 
83 	/* Skip for suspend/resume case */
84 	if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr)
85 	    && !amdgpu_passthrough(adev) && adev->in_suspend
86 		&& adev->asic_type != CHIP_RAVEN) {
87 		pr_info("dpm has been enabled\n");
88 		return 0;
89 	}
90 
91 	if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
92 		ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
93 
94 	return ret;
95 }
96 
97 int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
98 {
99 	int ret = -EINVAL;
100 
101 	PHM_FUNC_CHECK(hwmgr);
102 
103 	if (!hwmgr->not_vf)
104 		return 0;
105 
106 	if (!smum_is_dpm_running(hwmgr)) {
107 		pr_info("dpm has been disabled\n");
108 		return 0;
109 	}
110 
111 	if (hwmgr->hwmgr_func->dynamic_state_management_disable)
112 		ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
113 
114 	return ret;
115 }
116 
117 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
118 {
119 	int ret = 0;
120 
121 	PHM_FUNC_CHECK(hwmgr);
122 
123 	if (hwmgr->hwmgr_func->force_dpm_level != NULL)
124 		ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
125 
126 	return ret;
127 }
128 
129 int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
130 				   struct pp_power_state *adjusted_ps,
131 			     const struct pp_power_state *current_ps)
132 {
133 	PHM_FUNC_CHECK(hwmgr);
134 
135 	if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL)
136 		return hwmgr->hwmgr_func->apply_state_adjust_rules(
137 									hwmgr,
138 								 adjusted_ps,
139 								 current_ps);
140 	return 0;
141 }
142 
143 int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
144 {
145 	PHM_FUNC_CHECK(hwmgr);
146 
147 	if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL)
148 		return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr);
149 	return 0;
150 }
151 
152 int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
153 {
154 	PHM_FUNC_CHECK(hwmgr);
155 
156 	if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
157 		return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
158 	return 0;
159 }
160 
161 
162 int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
163 {
164 	PHM_FUNC_CHECK(hwmgr);
165 
166 	if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
167 		return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
168 
169 	return 0;
170 }
171 
172 int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
173 {
174 	PHM_FUNC_CHECK(hwmgr);
175 
176 	if (NULL != hwmgr->hwmgr_func->pre_display_config_changed)
177 		hwmgr->hwmgr_func->pre_display_config_changed(hwmgr);
178 
179 	return 0;
180 
181 }
182 
183 int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
184 {
185 	PHM_FUNC_CHECK(hwmgr);
186 
187 	if (NULL != hwmgr->hwmgr_func->display_config_changed)
188 		hwmgr->hwmgr_func->display_config_changed(hwmgr);
189 
190 	return 0;
191 }
192 
193 int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
194 {
195 	PHM_FUNC_CHECK(hwmgr);
196 
197 	if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
198 			hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
199 
200 	return 0;
201 }
202 
203 int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
204 {
205 	PHM_FUNC_CHECK(hwmgr);
206 
207 	if (!hwmgr->not_vf)
208 		return 0;
209 
210 	if (hwmgr->hwmgr_func->stop_thermal_controller == NULL)
211 		return -EINVAL;
212 
213 	return hwmgr->hwmgr_func->stop_thermal_controller(hwmgr);
214 }
215 
216 int phm_register_irq_handlers(struct pp_hwmgr *hwmgr)
217 {
218 	PHM_FUNC_CHECK(hwmgr);
219 
220 	if (hwmgr->hwmgr_func->register_irq_handlers != NULL)
221 		return hwmgr->hwmgr_func->register_irq_handlers(hwmgr);
222 
223 	return 0;
224 }
225 
226 /**
227  * phm_start_thermal_controller - Initializes the thermal controller subsystem.
228  *
229  * @hwmgr:   the address of the powerplay hardware manager.
230  * Exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher.
231  */
232 int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
233 {
234 	int ret = 0;
235 	struct PP_TemperatureRange range = {
236 		TEMP_RANGE_MIN,
237 		TEMP_RANGE_MAX,
238 		TEMP_RANGE_MAX,
239 		TEMP_RANGE_MIN,
240 		TEMP_RANGE_MAX,
241 		TEMP_RANGE_MAX,
242 		TEMP_RANGE_MIN,
243 		TEMP_RANGE_MAX,
244 		TEMP_RANGE_MAX};
245 	struct amdgpu_device *adev = hwmgr->adev;
246 
247 	if (!hwmgr->not_vf)
248 		return 0;
249 
250 	if (hwmgr->hwmgr_func->get_thermal_temperature_range)
251 		hwmgr->hwmgr_func->get_thermal_temperature_range(
252 				hwmgr, &range);
253 
254 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
255 			PHM_PlatformCaps_ThermalController)
256 			&& hwmgr->hwmgr_func->start_thermal_controller != NULL)
257 		ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
258 
259 	adev->pm.dpm.thermal.min_temp = range.min;
260 	adev->pm.dpm.thermal.max_temp = range.max;
261 	adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
262 	adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
263 	adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
264 	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
265 	adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
266 	adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
267 	adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
268 
269 	return ret;
270 }
271 
272 
273 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
274 {
275 	if (hwmgr == NULL ||
276 	    hwmgr->hwmgr_func == NULL)
277 		return false;
278 
279 	if (hwmgr->pp_one_vf)
280 		return false;
281 
282 	if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
283 		return false;
284 
285 	return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
286 }
287 
288 
289 int phm_check_states_equal(struct pp_hwmgr *hwmgr,
290 				 const struct pp_hw_power_state *pstate1,
291 				 const struct pp_hw_power_state *pstate2,
292 				 bool *equal)
293 {
294 	PHM_FUNC_CHECK(hwmgr);
295 
296 	if (hwmgr->hwmgr_func->check_states_equal == NULL)
297 		return -EINVAL;
298 
299 	return hwmgr->hwmgr_func->check_states_equal(hwmgr, pstate1, pstate2, equal);
300 }
301 
302 int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
303 		    const struct amd_pp_display_configuration *display_config)
304 {
305 	int index = 0;
306 	int number_of_active_display = 0;
307 
308 	PHM_FUNC_CHECK(hwmgr);
309 
310 	if (display_config == NULL)
311 		return -EINVAL;
312 
313 	if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
314 		hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
315 
316 	for (index = 0; index < display_config->num_path_including_non_display; index++) {
317 		if (display_config->displays[index].controller_id != 0)
318 			number_of_active_display++;
319 	}
320 
321 	if (NULL != hwmgr->hwmgr_func->set_active_display_count)
322 		hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display);
323 
324 	if (hwmgr->hwmgr_func->store_cc6_data == NULL)
325 		return -EINVAL;
326 
327 	/* TODO: pass other display configuration in the future */
328 
329 	if (hwmgr->hwmgr_func->store_cc6_data)
330 		hwmgr->hwmgr_func->store_cc6_data(hwmgr,
331 				display_config->cpu_pstate_separation_time,
332 				display_config->cpu_cc6_disable,
333 				display_config->cpu_pstate_disable,
334 				display_config->nb_pstate_switch_disable);
335 
336 	return 0;
337 }
338 
339 int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
340 		struct amd_pp_simple_clock_info *info)
341 {
342 	PHM_FUNC_CHECK(hwmgr);
343 
344 	if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL)
345 		return -EINVAL;
346 	return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info);
347 }
348 
349 int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr)
350 {
351 	PHM_FUNC_CHECK(hwmgr);
352 
353 	if (hwmgr->hwmgr_func->set_cpu_power_state != NULL)
354 		return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr);
355 
356 	return 0;
357 }
358 
359 
360 int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
361 				PHM_PerformanceLevelDesignation designation, uint32_t index,
362 				PHM_PerformanceLevel *level)
363 {
364 	PHM_FUNC_CHECK(hwmgr);
365 	if (hwmgr->hwmgr_func->get_performance_level == NULL)
366 		return -EINVAL;
367 
368 	return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level);
369 
370 
371 }
372 
373 
374 /**
375  * phm_get_clock_info
376  *
377  * @hwmgr:  the address of the powerplay hardware manager.
378  * @state: the address of the Power State structure.
379  * @pclock_info: the address of PP_ClockInfo structure where the result will be returned.
380  * @designation: PHM performance level designation
381  * Exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end.
382  */
383 int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info,
384 			PHM_PerformanceLevelDesignation designation)
385 {
386 	int result;
387 	PHM_PerformanceLevel performance_level = {0};
388 
389 	PHM_FUNC_CHECK(hwmgr);
390 
391 	PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL);
392 	PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL);
393 
394 	result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level);
395 
396 	PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result);
397 
398 
399 	pclock_info->min_mem_clk = performance_level.memory_clock;
400 	pclock_info->min_eng_clk = performance_level.coreClock;
401 	pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
402 
403 
404 	result = phm_get_performance_level(hwmgr, state, designation,
405 					(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level);
406 
407 	PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result);
408 
409 	pclock_info->max_mem_clk = performance_level.memory_clock;
410 	pclock_info->max_eng_clk = performance_level.coreClock;
411 	pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
412 
413 	return 0;
414 }
415 
416 int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
417 {
418 	PHM_FUNC_CHECK(hwmgr);
419 
420 	if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL)
421 		return -EINVAL;
422 
423 	return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info);
424 
425 }
426 
427 int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
428 {
429 	PHM_FUNC_CHECK(hwmgr);
430 
431 	if (hwmgr->hwmgr_func->get_clock_by_type == NULL)
432 		return -EINVAL;
433 
434 	return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks);
435 
436 }
437 
438 int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
439 		enum amd_pp_clock_type type,
440 		struct pp_clock_levels_with_latency *clocks)
441 {
442 	PHM_FUNC_CHECK(hwmgr);
443 
444 	if (hwmgr->hwmgr_func->get_clock_by_type_with_latency == NULL)
445 		return -EINVAL;
446 
447 	return hwmgr->hwmgr_func->get_clock_by_type_with_latency(hwmgr, type, clocks);
448 
449 }
450 
451 int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
452 		enum amd_pp_clock_type type,
453 		struct pp_clock_levels_with_voltage *clocks)
454 {
455 	PHM_FUNC_CHECK(hwmgr);
456 
457 	if (hwmgr->hwmgr_func->get_clock_by_type_with_voltage == NULL)
458 		return -EINVAL;
459 
460 	return hwmgr->hwmgr_func->get_clock_by_type_with_voltage(hwmgr, type, clocks);
461 
462 }
463 
464 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
465 					void *clock_ranges)
466 {
467 	PHM_FUNC_CHECK(hwmgr);
468 
469 	if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges)
470 		return -EINVAL;
471 
472 	return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
473 								clock_ranges);
474 }
475 
476 int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
477 		struct pp_display_clock_request *clock)
478 {
479 	PHM_FUNC_CHECK(hwmgr);
480 
481 	if (!hwmgr->hwmgr_func->display_clock_voltage_request)
482 		return -EINVAL;
483 
484 	return hwmgr->hwmgr_func->display_clock_voltage_request(hwmgr, clock);
485 }
486 
487 int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
488 {
489 	PHM_FUNC_CHECK(hwmgr);
490 
491 	if (hwmgr->hwmgr_func->get_max_high_clocks == NULL)
492 		return -EINVAL;
493 
494 	return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
495 }
496 
497 int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
498 {
499 	PHM_FUNC_CHECK(hwmgr);
500 
501 	if (!hwmgr->not_vf)
502 		return 0;
503 
504 	if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
505 		return -EINVAL;
506 
507 	return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
508 }
509 
510 int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
511 {
512 	PHM_FUNC_CHECK(hwmgr);
513 
514 	if (!hwmgr->hwmgr_func->set_active_display_count)
515 		return -EINVAL;
516 
517 	return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
518 }
519