xref: /linux/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 
27 
28 #include "core_types.h"
29 #include "clk_mgr_internal.h"
30 #include "reg_helper.h"
31 #include "dm_helpers.h"
32 #include "dcn35_smu.h"
33 
34 #include "mp/mp_14_0_0_offset.h"
35 #include "mp/mp_14_0_0_sh_mask.h"
36 
37 /* TODO: Use the real headers when they're correct */
38 #define MP1_BASE__INST0_SEG0                       0x00016000
39 #define MP1_BASE__INST0_SEG1                       0x0243FC00
40 #define MP1_BASE__INST0_SEG2                       0x00DC0000
41 #define MP1_BASE__INST0_SEG3                       0x00E00000
42 #define MP1_BASE__INST0_SEG4                       0x00E40000
43 #define MP1_BASE__INST0_SEG5                       0
44 
45 #ifdef BASE_INNER
46 #undef BASE_INNER
47 #endif
48 
49 #define BASE_INNER(seg) MP1_BASE__INST0_SEG ## seg
50 
51 #define BASE(seg) BASE_INNER(seg)
52 
53 #define REG(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
54 
55 #define FN(reg_name, field) \
56 	FD(reg_name##__##field)
57 
58 #include "logger_types.h"
59 #undef DC_LOGGER
60 #define DC_LOGGER \
61 	CTX->logger
62 #define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
63 
64 #define VBIOSSMC_MSG_TestMessage                  0x1
65 #define VBIOSSMC_MSG_GetSmuVersion                0x2
66 #define VBIOSSMC_MSG_PowerUpGfx                   0x3
67 #define VBIOSSMC_MSG_SetDispclkFreq               0x4
68 #define VBIOSSMC_MSG_SetDprefclkFreq              0x5   //Not used. DPRef is constant
69 #define VBIOSSMC_MSG_SetDppclkFreq                0x6
70 #define VBIOSSMC_MSG_SetHardMinDcfclkByFreq       0x7
71 #define VBIOSSMC_MSG_SetMinDeepSleepDcfclk        0x8
72 #define VBIOSSMC_MSG_SetPhyclkVoltageByFreq       0x9	//Keep it in case VMIN dees not support phy clk
73 #define VBIOSSMC_MSG_GetFclkFrequency             0xA
74 #define VBIOSSMC_MSG_SetDisplayCount              0xB   //Not used anymore
75 #define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xC   //To ask PMFW turn off TMDP 48MHz refclk during display off to save power
76 #define VBIOSSMC_MSG_UpdatePmeRestore             0xD
77 #define VBIOSSMC_MSG_SetVbiosDramAddrHigh         0xE   //Used for WM table txfr
78 #define VBIOSSMC_MSG_SetVbiosDramAddrLow          0xF
79 #define VBIOSSMC_MSG_TransferTableSmu2Dram        0x10
80 #define VBIOSSMC_MSG_TransferTableDram2Smu        0x11
81 #define VBIOSSMC_MSG_SetDisplayIdleOptimizations  0x12
82 #define VBIOSSMC_MSG_GetDprefclkFreq              0x13
83 #define VBIOSSMC_MSG_GetDtbclkFreq                0x14
84 #define VBIOSSMC_MSG_AllowZstatesEntry            0x15
85 #define VBIOSSMC_MSG_DisallowZstatesEntry     	  0x16
86 #define VBIOSSMC_MSG_SetDtbClk                    0x17
87 #define VBIOSSMC_MSG_DispPsrEntry                 0x18 ///< Display PSR entry, DMU
88 #define VBIOSSMC_MSG_DispPsrExit                  0x19 ///< Display PSR exit, DMU
89 #define VBIOSSMC_MSG_DisableLSdma                 0x1A ///< Disable LSDMA; only sent by VBIOS
90 #define VBIOSSMC_MSG_DpControllerPhyStatus        0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number
91 #define VBIOSSMC_MSG_QueryIPS2Support             0x1C ///< Return 1: support; else not supported
92 #define VBIOSSMC_MSG_NotifyHostRouterBW           0x1D
93 #define VBIOSSMC_Message_Count                    0x1E
94 
95 #define VBIOSSMC_Status_BUSY                      0x0
96 #define VBIOSSMC_Result_OK                        0x1
97 #define VBIOSSMC_Result_Failed                    0xFF
98 #define VBIOSSMC_Result_UnknownCmd                0xFE
99 #define VBIOSSMC_Result_CmdRejectedPrereq         0xFD
100 #define VBIOSSMC_Result_CmdRejectedBusy           0xFC
101 
102 union dcn35_dpia_host_router_bw {
103 	struct {
104 		uint32_t hr_id : 16;
105 		uint32_t bw_mbps : 16;
106 	} bits;
107 	uint32_t all;
108 };
109 
110 /*
111  * Function to be used instead of REG_WAIT macro because the wait ends when
112  * the register is NOT EQUAL to zero, and because `the translation in msg_if.h
113  * won't work with REG_WAIT.
114  */
dcn35_smu_wait_for_response(struct clk_mgr_internal * clk_mgr,unsigned int delay_us,unsigned int max_retries)115 static uint32_t dcn35_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
116 {
117 	uint32_t res_val = VBIOSSMC_Status_BUSY;
118 
119 	do {
120 		res_val = REG_READ(MP1_SMN_C2PMSG_91);
121 		if (res_val != VBIOSSMC_Status_BUSY)
122 			break;
123 
124 		if (delay_us >= 1000)
125 			msleep(delay_us/1000);
126 		else if (delay_us > 0)
127 			udelay(delay_us);
128 
129 		if (clk_mgr->base.ctx->dc->debug.disable_timeout)
130 			max_retries++;
131 	} while (max_retries--);
132 
133 	return res_val;
134 }
135 
dcn35_smu_send_msg_with_param(struct clk_mgr_internal * clk_mgr,unsigned int msg_id,unsigned int param)136 static int dcn35_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
137 					 unsigned int msg_id,
138 					 unsigned int param)
139 {
140 	uint32_t result;
141 
142 	result = dcn35_smu_wait_for_response(clk_mgr, 10, 2000000);
143 	ASSERT(result == VBIOSSMC_Result_OK);
144 
145 	if (result != VBIOSSMC_Result_OK) {
146 		DC_LOG_WARNING("SMU response after wait: %d, msg id = %d\n", result, msg_id);
147 
148 		if (result == VBIOSSMC_Status_BUSY)
149 			return -1;
150 	}
151 
152 	/* First clear response register */
153 	REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
154 
155 	/* Set the parameter register for the SMU message, unit is Mhz */
156 	REG_WRITE(MP1_SMN_C2PMSG_83, param);
157 
158 	/* Trigger the message transaction by writing the message ID */
159 	REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
160 
161 	result = dcn35_smu_wait_for_response(clk_mgr, 10, 2000000);
162 
163 	if (result == VBIOSSMC_Result_Failed) {
164 		if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
165 		    param == TABLE_WATERMARKS)
166 			DC_LOG_WARNING("Watermarks table not configured properly by SMU");
167 		else
168 			ASSERT(0);
169 		REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
170 		DC_LOG_WARNING("SMU response after wait: %d, msg id = %d\n", result, msg_id);
171 		return -1;
172 	}
173 
174 	if (IS_SMU_TIMEOUT(result)) {
175 		ASSERT(0);
176 		result = dcn35_smu_wait_for_response(clk_mgr, 10, 2000000);
177 		//dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
178 		DC_LOG_WARNING("SMU response after wait: %d, msg id = %d\n", result, msg_id);
179 	}
180 
181 	return REG_READ(MP1_SMN_C2PMSG_83);
182 }
183 
dcn35_smu_get_smu_version(struct clk_mgr_internal * clk_mgr)184 int dcn35_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
185 {
186 	return dcn35_smu_send_msg_with_param(
187 			clk_mgr,
188 			VBIOSSMC_MSG_GetSmuVersion,
189 			0);
190 }
191 
192 
dcn35_smu_set_dispclk(struct clk_mgr_internal * clk_mgr,int requested_dispclk_khz)193 int dcn35_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
194 {
195 	int actual_dispclk_set_mhz = -1;
196 
197 	if (!clk_mgr->smu_present)
198 		return requested_dispclk_khz;
199 
200 	/*  Unit of SMU msg parameter is Mhz */
201 	actual_dispclk_set_mhz = dcn35_smu_send_msg_with_param(
202 			clk_mgr,
203 			VBIOSSMC_MSG_SetDispclkFreq,
204 			khz_to_mhz_ceil(requested_dispclk_khz));
205 
206 	smu_print("requested_dispclk_khz = %d, actual_dispclk_set_mhz: %d\n", requested_dispclk_khz, actual_dispclk_set_mhz);
207 	return actual_dispclk_set_mhz * 1000;
208 }
209 
dcn35_smu_set_dprefclk(struct clk_mgr_internal * clk_mgr)210 int dcn35_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
211 {
212 	int actual_dprefclk_set_mhz = -1;
213 
214 	if (!clk_mgr->smu_present)
215 		return clk_mgr->base.dprefclk_khz;
216 
217 	actual_dprefclk_set_mhz = dcn35_smu_send_msg_with_param(
218 			clk_mgr,
219 			VBIOSSMC_MSG_SetDprefclkFreq,
220 			khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
221 
222 	/* TODO: add code for programing DP DTO, currently this is down by command table */
223 
224 	return actual_dprefclk_set_mhz * 1000;
225 }
226 
dcn35_smu_set_hard_min_dcfclk(struct clk_mgr_internal * clk_mgr,int requested_dcfclk_khz)227 int dcn35_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
228 {
229 	int actual_dcfclk_set_mhz = -1;
230 
231 	if (!clk_mgr->smu_present)
232 		return requested_dcfclk_khz;
233 
234 	actual_dcfclk_set_mhz = dcn35_smu_send_msg_with_param(
235 			clk_mgr,
236 			VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
237 			khz_to_mhz_ceil(requested_dcfclk_khz));
238 
239 	smu_print("requested_dcfclk_khz = %d, actual_dcfclk_set_mhz: %d\n", requested_dcfclk_khz, actual_dcfclk_set_mhz);
240 
241 	return actual_dcfclk_set_mhz * 1000;
242 }
243 
dcn35_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal * clk_mgr,int requested_min_ds_dcfclk_khz)244 int dcn35_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
245 {
246 	int actual_min_ds_dcfclk_mhz = -1;
247 
248 	if (!clk_mgr->smu_present)
249 		return requested_min_ds_dcfclk_khz;
250 
251 	actual_min_ds_dcfclk_mhz = dcn35_smu_send_msg_with_param(
252 			clk_mgr,
253 			VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
254 			khz_to_mhz_ceil(requested_min_ds_dcfclk_khz));
255 
256 	smu_print("requested_min_ds_dcfclk_khz = %d, actual_min_ds_dcfclk_mhz: %d\n", requested_min_ds_dcfclk_khz, actual_min_ds_dcfclk_mhz);
257 
258 	return actual_min_ds_dcfclk_mhz * 1000;
259 }
260 
dcn35_smu_set_dppclk(struct clk_mgr_internal * clk_mgr,int requested_dpp_khz)261 int dcn35_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
262 {
263 	int actual_dppclk_set_mhz = -1;
264 
265 	if (!clk_mgr->smu_present)
266 		return requested_dpp_khz;
267 
268 	actual_dppclk_set_mhz = dcn35_smu_send_msg_with_param(
269 			clk_mgr,
270 			VBIOSSMC_MSG_SetDppclkFreq,
271 			khz_to_mhz_ceil(requested_dpp_khz));
272 
273 	smu_print("requested_dpp_khz = %d, actual_dppclk_set_mhz: %d\n", requested_dpp_khz, actual_dppclk_set_mhz);
274 
275 	return actual_dppclk_set_mhz * 1000;
276 }
277 
dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal * clk_mgr,uint32_t idle_info)278 void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info)
279 {
280 	if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
281 		return;
282 
283 	if (!clk_mgr->smu_present)
284 		return;
285 
286 	//TODO: Work with smu team to define optimization options.
287 	dcn35_smu_send_msg_with_param(
288 		clk_mgr,
289 		VBIOSSMC_MSG_SetDisplayIdleOptimizations,
290 		idle_info);
291 	smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info  = %x\n", __func__, idle_info);
292 }
293 
dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal * clk_mgr,bool enable)294 void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
295 {
296 	union display_idle_optimization_u idle_info = { 0 };
297 
298 	if (!clk_mgr->smu_present)
299 		return;
300 
301 	if (enable) {
302 		idle_info.idle_info.df_request_disabled = 1;
303 		idle_info.idle_info.phy_ref_clk_off = 1;
304 	}
305 
306 	dcn35_smu_send_msg_with_param(
307 			clk_mgr,
308 			VBIOSSMC_MSG_SetDisplayIdleOptimizations,
309 			idle_info.data);
310 	smu_print("%s smu_enable_phy_refclk_pwrdwn  = %d\n", __func__, enable ? 1 : 0);
311 }
312 
dcn35_smu_enable_pme_wa(struct clk_mgr_internal * clk_mgr)313 void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
314 {
315 	if (!clk_mgr->smu_present)
316 		return;
317 
318 	dcn35_smu_send_msg_with_param(
319 			clk_mgr,
320 			VBIOSSMC_MSG_UpdatePmeRestore,
321 			0);
322 	smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__);
323 }
324 
dcn35_smu_set_dram_addr_high(struct clk_mgr_internal * clk_mgr,uint32_t addr_high)325 void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
326 {
327 	if (!clk_mgr->smu_present)
328 		return;
329 
330 	dcn35_smu_send_msg_with_param(clk_mgr,
331 			VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
332 }
333 
dcn35_smu_set_dram_addr_low(struct clk_mgr_internal * clk_mgr,uint32_t addr_low)334 void dcn35_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
335 {
336 	if (!clk_mgr->smu_present)
337 		return;
338 
339 	dcn35_smu_send_msg_with_param(clk_mgr,
340 			VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
341 }
342 
dcn35_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal * clk_mgr)343 void dcn35_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
344 {
345 	if (!clk_mgr->smu_present)
346 		return;
347 
348 	dcn35_smu_send_msg_with_param(clk_mgr,
349 			VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS);
350 }
351 
dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal * clk_mgr)352 void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
353 {
354 	if (!clk_mgr->smu_present)
355 		return;
356 
357 	dcn35_smu_send_msg_with_param(clk_mgr,
358 			VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
359 }
360 
dcn35_smu_set_zstate_support(struct clk_mgr_internal * clk_mgr,enum dcn_zstate_support_state support)361 void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
362 {
363 	unsigned int msg_id, param, retv;
364 
365 	if (!clk_mgr->smu_present)
366 		return;
367 
368 	switch (support) {
369 
370 	case DCN_ZSTATE_SUPPORT_ALLOW:
371 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
372 		param = (1 << 10) | (1 << 9) | (1 << 8);
373 		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = 0x%x\n", __func__, param);
374 		break;
375 
376 	case DCN_ZSTATE_SUPPORT_DISALLOW:
377 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
378 		param = 0;
379 		smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = 0x%x\n",  __func__, param);
380 		break;
381 
382 
383 	case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
384 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
385 		param = (1 << 10);
386 		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = 0x%x\n", __func__, param);
387 		break;
388 
389 	case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
390 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
391 		param = (1 << 10) | (1 << 8);
392 		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = 0x%x\n", __func__, param);
393 		break;
394 
395 	case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
396 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
397 		param = (1 << 8);
398 		smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = 0x%x\n", __func__, param);
399 		break;
400 
401 	default: //DCN_ZSTATE_SUPPORT_UNKNOWN
402 		msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
403 		param = 0;
404 		break;
405 	}
406 
407 
408 	retv = dcn35_smu_send_msg_with_param(
409 		clk_mgr,
410 		msg_id,
411 		param);
412 	smu_print("%s:  msg_id = %d, param = 0x%x, return = 0x%x\n", __func__, msg_id, param, retv);
413 }
414 
dcn35_smu_get_dprefclk(struct clk_mgr_internal * clk_mgr)415 int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
416 {
417 	int dprefclk;
418 
419 	if (!clk_mgr->smu_present)
420 		return 0;
421 
422 	dprefclk = dcn35_smu_send_msg_with_param(clk_mgr,
423 						 VBIOSSMC_MSG_GetDprefclkFreq,
424 						 0);
425 
426 	smu_print("%s:  SMU DPREF clk  = %d mhz\n",  __func__, dprefclk);
427 	return dprefclk * 1000;
428 }
429 
dcn35_smu_get_dtbclk(struct clk_mgr_internal * clk_mgr)430 int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
431 {
432 	int dtbclk;
433 
434 	if (!clk_mgr->smu_present)
435 		return 0;
436 
437 	dtbclk = dcn35_smu_send_msg_with_param(clk_mgr,
438 					       VBIOSSMC_MSG_GetDtbclkFreq,
439 					       0);
440 
441 	smu_print("%s: get_dtbclk  = %dmhz\n", __func__, dtbclk);
442 	return dtbclk * 1000;
443 }
444 /* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
dcn35_smu_set_dtbclk(struct clk_mgr_internal * clk_mgr,bool enable)445 void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
446 {
447 	if (!clk_mgr->smu_present)
448 		return;
449 
450 	dcn35_smu_send_msg_with_param(
451 			clk_mgr,
452 			VBIOSSMC_MSG_SetDtbClk,
453 			enable);
454 	smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0);
455 }
456 
dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal * clk_mgr,bool enable)457 void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
458 {
459 	if (!clk_mgr->smu_present)
460 		return;
461 
462 	dcn35_smu_send_msg_with_param(
463 			clk_mgr,
464 			VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
465 			enable);
466 	smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
467 }
468 
dcn35_smu_exit_low_power_state(struct clk_mgr_internal * clk_mgr)469 int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
470 {
471 	int retv;
472 
473 	if (!clk_mgr->smu_present)
474 		return 0;
475 
476 	retv = dcn35_smu_send_msg_with_param(
477 		clk_mgr,
478 		VBIOSSMC_MSG_DispPsrExit,
479 		0);
480 	smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
481 	return retv;
482 }
483 
dcn35_smu_get_ips_supported(struct clk_mgr_internal * clk_mgr)484 int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
485 {
486 	int retv;
487 
488 	if (!clk_mgr->smu_present)
489 		return 0;
490 
491 	retv = dcn35_smu_send_msg_with_param(
492 			clk_mgr,
493 			VBIOSSMC_MSG_QueryIPS2Support,
494 			0);
495 
496 	//smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
497 	return retv;
498 }
499 
dcn35_smu_notify_host_router_bw(struct clk_mgr_internal * clk_mgr,uint32_t hr_id,uint32_t bw_kbps)500 void dcn35_smu_notify_host_router_bw(struct clk_mgr_internal *clk_mgr, uint32_t hr_id, uint32_t bw_kbps)
501 {
502 	union dcn35_dpia_host_router_bw msg_data = { 0 };
503 
504 	msg_data.bits.hr_id = hr_id;
505 	msg_data.bits.bw_mbps = bw_kbps / 1000;
506 
507 	dcn35_smu_send_msg_with_param(clk_mgr, VBIOSSMC_MSG_NotifyHostRouterBW, msg_data.all);
508 }
509