xref: /linux/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c (revision e332935a540eb76dd656663ca908eb0544d96757)
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 
27 #include "dm_services.h"
28 #include "dm_helpers.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "dcn30_hwseq.h"
32 #include "dccg.h"
33 #include "dce/dce_hwseq.h"
34 #include "dcn30/dcn30_mpc.h"
35 #include "dcn30/dcn30_dpp.h"
36 #include "dcn10/dcn10_cm_common.h"
37 #include "dcn30/dcn30_cm_common.h"
38 #include "reg_helper.h"
39 #include "abm.h"
40 #include "clk_mgr.h"
41 #include "hubp.h"
42 #include "dchubbub.h"
43 #include "timing_generator.h"
44 #include "opp.h"
45 #include "ipp.h"
46 #include "mpc.h"
47 #include "mcif_wb.h"
48 #include "dc_dmub_srv.h"
49 #include "link_hwss.h"
50 #include "dpcd_defs.h"
51 #include "dcn20/dcn20_hwseq.h"
52 #include "dcn30/dcn30_resource.h"
53 #include "link.h"
54 #include "dc_state_priv.h"
55 
56 
57 
58 #define DC_LOGGER_INIT(logger)
59 
60 #define CTX \
61 	hws->ctx
62 #define REG(reg)\
63 	hws->regs->reg
64 #define DC_LOGGER \
65 		dc->ctx->logger
66 
67 
68 #undef FN
69 #define FN(reg_name, field_name) \
70 	hws->shifts->field_name, hws->masks->field_name
71 
dcn30_log_color_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)72 void dcn30_log_color_state(struct dc *dc,
73 			   struct dc_log_buffer_ctx *log_ctx)
74 {
75 	struct dc_context *dc_ctx = dc->ctx;
76 	struct resource_pool *pool = dc->res_pool;
77 	bool is_gamut_remap_available = false;
78 	int i;
79 
80 	DTN_INFO("DPP:  DGAM ROM  DGAM ROM type  DGAM LUT  SHAPER mode"
81 		 "  3DLUT mode  3DLUT bit depth  3DLUT size  RGAM mode"
82 		 "  GAMUT adjust  "
83 		 "C11        C12        C13        C14        "
84 		 "C21        C22        C23        C24        "
85 		 "C31        C32        C33        C34        \n");
86 
87 	for (i = 0; i < pool->pipe_count; i++) {
88 		struct dpp *dpp = pool->dpps[i];
89 		struct dcn_dpp_state s = {0};
90 
91 		dpp->funcs->dpp_read_state(dpp, &s);
92 
93 		if (dpp->funcs->dpp_get_gamut_remap) {
94 			dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
95 			is_gamut_remap_available = true;
96 		}
97 
98 		if (!s.is_enabled)
99 			continue;
100 
101 		DTN_INFO("[%2d]:  %7x  %13s  %8s  %11s  %10s  %15s  %10s  %9s",
102 			dpp->inst,
103 			s.pre_dgam_mode,
104 			(s.pre_dgam_select == 0) ? "sRGB" :
105 			 ((s.pre_dgam_select == 1) ? "Gamma 2.2" :
106 			 ((s.pre_dgam_select == 2) ? "Gamma 2.4" :
107 			 ((s.pre_dgam_select == 3) ? "Gamma 2.6" :
108 			 ((s.pre_dgam_select == 4) ? "BT.709" :
109 			 ((s.pre_dgam_select == 5) ? "PQ" :
110 			 ((s.pre_dgam_select == 6) ? "HLG" :
111 						     "Unknown")))))),
112 			(s.gamcor_mode == 0) ? "Bypass" :
113 			 ((s.gamcor_mode == 1) ? "RAM A" :
114 						 "RAM B"),
115 			(s.shaper_lut_mode == 1) ? "RAM A" :
116 			 ((s.shaper_lut_mode == 2) ? "RAM B" :
117 						     "Bypass"),
118 			(s.lut3d_mode == 1) ? "RAM A" :
119 			 ((s.lut3d_mode == 2) ? "RAM B" :
120 						"Bypass"),
121 			(s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
122 			(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
123 			(s.rgam_lut_mode == 0) ? "Bypass" :
124 			 ((s.rgam_lut_mode == 1) ? "RAM A" :
125 						   "RAM B"));
126 
127 		if (is_gamut_remap_available) {
128 			DTN_INFO("  %12s  "
129 				 "%010lld %010lld %010lld %010lld "
130 				 "%010lld %010lld %010lld %010lld "
131 				 "%010lld %010lld %010lld %010lld",
132 
133 			(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
134 				((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
135 									  "SW"),
136 			s.gamut_remap.temperature_matrix[0].value,
137 			s.gamut_remap.temperature_matrix[1].value,
138 			s.gamut_remap.temperature_matrix[2].value,
139 			s.gamut_remap.temperature_matrix[3].value,
140 			s.gamut_remap.temperature_matrix[4].value,
141 			s.gamut_remap.temperature_matrix[5].value,
142 			s.gamut_remap.temperature_matrix[6].value,
143 			s.gamut_remap.temperature_matrix[7].value,
144 			s.gamut_remap.temperature_matrix[8].value,
145 			s.gamut_remap.temperature_matrix[9].value,
146 			s.gamut_remap.temperature_matrix[10].value,
147 			s.gamut_remap.temperature_matrix[11].value);
148 		}
149 
150 		DTN_INFO("\n");
151 	}
152 	DTN_INFO("\n");
153 	DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d"
154 		 "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
155 		 "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d"
156 		 "  blnd_lut:%d  oscs:%d\n\n",
157 		 dc->caps.color.dpp.input_lut_shared,
158 		 dc->caps.color.dpp.icsc,
159 		 dc->caps.color.dpp.dgam_ram,
160 		 dc->caps.color.dpp.dgam_rom_caps.srgb,
161 		 dc->caps.color.dpp.dgam_rom_caps.bt2020,
162 		 dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
163 		 dc->caps.color.dpp.dgam_rom_caps.pq,
164 		 dc->caps.color.dpp.dgam_rom_caps.hlg,
165 		 dc->caps.color.dpp.post_csc,
166 		 dc->caps.color.dpp.gamma_corr,
167 		 dc->caps.color.dpp.dgam_rom_for_yuv,
168 		 dc->caps.color.dpp.hw_3d_lut,
169 		 dc->caps.color.dpp.ogam_ram,
170 		 dc->caps.color.dpp.ocsc);
171 
172 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE"
173 		 "  SHAPER mode  3DLUT mode  3DLUT bit-depth  3DLUT size  OGAM mode  OGAM LUT"
174 		 "  GAMUT adjust  "
175 		 "C11        C12        C13        C14        "
176 		 "C21        C22        C23        C24        "
177 		 "C31        C32        C33        C34        \n");
178 
179 	for (i = 0; i < pool->mpcc_count; i++) {
180 		struct mpcc_state s = {0};
181 
182 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
183 		mpc3_get_gamut_remap(pool->mpc, i,  &s.gamut_remap);
184 
185 		if (s.opp_id != 0xf)
186 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d  %11s %11s %16s %11s %10s %9s"
187 				 "  %-12s  "
188 				 "%010lld %010lld %010lld %010lld "
189 				 "%010lld %010lld %010lld %010lld "
190 				 "%010lld %010lld %010lld %010lld\n",
191 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
192 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
193 				s.idle,
194 				(s.shaper_lut_mode == 1) ? "RAM A" :
195 				 ((s.shaper_lut_mode == 2) ? "RAM B" :
196 							     "Bypass"),
197 				(s.lut3d_mode == 1) ? "RAM A" :
198 				 ((s.lut3d_mode == 2) ? "RAM B" :
199 							"Bypass"),
200 				(s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
201 				(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
202 				(s.rgam_mode == 0) ? "Bypass" :
203 				 ((s.rgam_mode == 2) ? "RAM" :
204 						       "Unknown"),
205 				(s.rgam_mode == 1) ? "B" : "A",
206 				(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
207 					((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
208 										  "SW"),
209 				s.gamut_remap.temperature_matrix[0].value,
210 				s.gamut_remap.temperature_matrix[1].value,
211 				s.gamut_remap.temperature_matrix[2].value,
212 				s.gamut_remap.temperature_matrix[3].value,
213 				s.gamut_remap.temperature_matrix[4].value,
214 				s.gamut_remap.temperature_matrix[5].value,
215 				s.gamut_remap.temperature_matrix[6].value,
216 				s.gamut_remap.temperature_matrix[7].value,
217 				s.gamut_remap.temperature_matrix[8].value,
218 				s.gamut_remap.temperature_matrix[9].value,
219 				s.gamut_remap.temperature_matrix[10].value,
220 				s.gamut_remap.temperature_matrix[11].value);
221 
222 	}
223 	DTN_INFO("\n");
224 	DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
225 		 dc->caps.color.mpc.gamut_remap,
226 		 dc->caps.color.mpc.num_3dluts,
227 		 dc->caps.color.mpc.ogam_ram,
228 		 dc->caps.color.mpc.ocsc);
229 }
230 
dcn30_set_blend_lut(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)231 bool dcn30_set_blend_lut(
232 	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
233 {
234 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
235 	bool result = true;
236 	const struct pwl_params *blend_lut = NULL;
237 
238 	if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
239 		blend_lut = &plane_state->blend_tf.pwl;
240 	else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
241 		result = cm3_helper_translate_curve_to_hw_format(
242 				&plane_state->blend_tf, &dpp_base->regamma_params, false);
243 		if (!result)
244 			return result;
245 
246 		blend_lut = &dpp_base->regamma_params;
247 	}
248 	result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
249 
250 	return result;
251 }
252 
dcn30_set_mpc_shaper_3dlut(struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)253 static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
254 				       const struct dc_stream_state *stream)
255 {
256 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
257 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
258 	struct dc *dc = pipe_ctx->stream->ctx->dc;
259 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
260 	bool result = false;
261 	int acquired_rmu = 0;
262 	int mpcc_id_projected = 0;
263 
264 	const struct pwl_params *shaper_lut = NULL;
265 	//get the shaper lut params
266 	if (stream->func_shaper) {
267 		if (stream->func_shaper->type == TF_TYPE_HWPWL) {
268 			shaper_lut = &stream->func_shaper->pwl;
269 		} else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
270 			cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper,
271 							       &dpp_base->shaper_params, true);
272 			shaper_lut = &dpp_base->shaper_params;
273 		}
274 	}
275 
276 	if (stream->lut3d_func &&
277 	    stream->lut3d_func->state.bits.initialized == 1 &&
278 	    stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
279 		if (stream->lut3d_func->state.bits.rmu_mux_num == 0)
280 			mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;
281 		else if (stream->lut3d_func->state.bits.rmu_mux_num == 1)
282 			mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu1_mux;
283 		else if (stream->lut3d_func->state.bits.rmu_mux_num == 2)
284 			mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;
285 		if (mpcc_id_projected != mpcc_id)
286 			BREAK_TO_DEBUGGER();
287 		/* find the reason why logical layer assigned a different
288 		 * mpcc_id into acquire_post_bldn_3dlut
289 		 */
290 		acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id,
291 						       stream->lut3d_func->state.bits.rmu_mux_num);
292 		if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)
293 			BREAK_TO_DEBUGGER();
294 
295 		result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d,
296 						   stream->lut3d_func->state.bits.rmu_mux_num);
297 		if (!result)
298 			DC_LOG_ERROR("%s: program_3dlut failed\n", __func__);
299 
300 		result = mpc->funcs->program_shaper(mpc, shaper_lut,
301 						    stream->lut3d_func->state.bits.rmu_mux_num);
302 		if (!result)
303 			DC_LOG_ERROR("%s: program_shaper failed\n", __func__);
304 
305 	} else {
306 		// loop through the available mux and release the requested mpcc_id
307 		mpc->funcs->release_rmu(mpc, mpcc_id);
308 	}
309 
310 	return result;
311 }
312 
dcn30_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)313 bool dcn30_set_input_transfer_func(struct dc *dc,
314 				struct pipe_ctx *pipe_ctx,
315 				const struct dc_plane_state *plane_state)
316 {
317 	struct dce_hwseq *hws = dc->hwseq;
318 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
319 	enum dc_transfer_func_predefined tf;
320 	bool result = true;
321 	const struct pwl_params *params = NULL;
322 
323 	if (dpp_base == NULL || plane_state == NULL)
324 		return false;
325 
326 	tf = TRANSFER_FUNCTION_UNITY;
327 
328 	if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED)
329 		tf = plane_state->in_transfer_func.tf;
330 
331 	dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
332 
333 	if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL)
334 		params = &plane_state->in_transfer_func.pwl;
335 	else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS &&
336 		cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func,
337 				&dpp_base->degamma_params, false))
338 		params = &dpp_base->degamma_params;
339 
340 	result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
341 
342 	if (pipe_ctx->stream_res.opp && pipe_ctx->stream_res.opp->ctx) {
343 		if (dpp_base->funcs->dpp_program_blnd_lut)
344 			hws->funcs.set_blend_lut(pipe_ctx, plane_state);
345 		if (dpp_base->funcs->dpp_program_shaper_lut &&
346 				dpp_base->funcs->dpp_program_3dlut)
347 			hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
348 	}
349 
350 	return result;
351 }
352 
dcn30_program_gamut_remap(struct pipe_ctx * pipe_ctx)353 void dcn30_program_gamut_remap(struct pipe_ctx *pipe_ctx)
354 {
355 	int i = 0;
356 	struct dpp_grph_csc_adjustment dpp_adjust;
357 	struct mpc_grph_gamut_adjustment mpc_adjust;
358 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
359 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
360 
361 	memset(&dpp_adjust, 0, sizeof(dpp_adjust));
362 	dpp_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
363 
364 	if (pipe_ctx->plane_state &&
365 	    pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
366 		dpp_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
367 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
368 			dpp_adjust.temperature_matrix[i] =
369 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
370 	}
371 
372 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp,
373 							    &dpp_adjust);
374 
375 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
376 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
377 
378 	if (pipe_ctx->top_pipe == NULL) {
379 		if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
380 			mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
381 			for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
382 				mpc_adjust.temperature_matrix[i] =
383 					pipe_ctx->stream->gamut_remap_matrix.matrix[i];
384 		}
385 	}
386 
387 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
388 }
389 
dcn30_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)390 bool dcn30_set_output_transfer_func(struct dc *dc,
391 				struct pipe_ctx *pipe_ctx,
392 				const struct dc_stream_state *stream)
393 {
394 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
395 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
396 	const struct pwl_params *params = NULL;
397 	bool ret = false;
398 
399 	/* program OGAM or 3DLUT only for the top pipe*/
400 	if (pipe_ctx->top_pipe == NULL) {
401 		/*program rmu shaper and 3dlut in MPC*/
402 		ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream);
403 		if (ret == false && mpc->funcs->set_output_gamma) {
404 			if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
405 				params = &stream->out_transfer_func.pwl;
406 			else if (pipe_ctx->stream->out_transfer_func.type ==
407 					TF_TYPE_DISTRIBUTED_POINTS &&
408 					cm3_helper_translate_curve_to_hw_format(
409 					&stream->out_transfer_func,
410 					&mpc->blender_params, false))
411 				params = &mpc->blender_params;
412 			 /* there are no ROM LUTs in OUTGAM */
413 			if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
414 				BREAK_TO_DEBUGGER();
415 		}
416 	}
417 
418 	if (mpc->funcs->set_output_gamma)
419 		mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
420 	else
421 		DC_LOG_ERROR("%s: set_output_gamma function pointer is NULL.\n", __func__);
422 
423 	return ret;
424 }
425 
dcn30_set_writeback(struct dc * dc,struct dc_writeback_info * wb_info,struct dc_state * context)426 static void dcn30_set_writeback(
427 		struct dc *dc,
428 		struct dc_writeback_info *wb_info,
429 		struct dc_state *context)
430 {
431 	struct mcif_wb *mcif_wb;
432 	struct mcif_buf_params *mcif_buf_params;
433 
434 	ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
435 	ASSERT(wb_info->wb_enabled);
436 	ASSERT(wb_info->mpcc_inst >= 0);
437 	ASSERT(wb_info->mpcc_inst < dc->res_pool->mpcc_count);
438 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
439 	mcif_buf_params = &wb_info->mcif_buf_params;
440 
441 	/* set DWB MPC mux */
442 	dc->res_pool->mpc->funcs->set_dwb_mux(dc->res_pool->mpc,
443 			wb_info->dwb_pipe_inst, wb_info->mpcc_inst);
444 	/* set MCIF_WB buffer and arbitration configuration */
445 	mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, wb_info->dwb_params.dest_height);
446 	mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
447 }
448 
dcn30_update_writeback(struct dc * dc,struct dc_writeback_info * wb_info,struct dc_state * context)449 void dcn30_update_writeback(
450 		struct dc *dc,
451 		struct dc_writeback_info *wb_info,
452 		struct dc_state *context)
453 {
454 	struct dwbc *dwb;
455 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
456 	DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
457 		__func__, wb_info->dwb_pipe_inst,\
458 		wb_info->mpcc_inst);
459 
460 	dcn30_set_writeback(dc, wb_info, context);
461 
462 	/* update DWB */
463 	dwb->funcs->update(dwb, &wb_info->dwb_params);
464 }
465 
dcn30_mmhubbub_warmup(struct dc * dc,unsigned int num_dwb,struct dc_writeback_info * wb_info)466 bool dcn30_mmhubbub_warmup(
467 	struct dc *dc,
468 	unsigned int num_dwb,
469 	struct dc_writeback_info *wb_info)
470 {
471 	struct dwbc *dwb;
472 	struct mcif_wb *mcif_wb;
473 	struct mcif_warmup_params warmup_params = {0};
474 	unsigned int  i, i_buf;
475 	/* make sure there is no active DWB enabled */
476 	for (i = 0; i < num_dwb; i++) {
477 		dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
478 		if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) {
479 			/*can not do warmup while any dwb enabled*/
480 			return false;
481 		}
482 	}
483 
484 	if (wb_info->mcif_warmup_params.p_vmid == 0)
485 		return false;
486 
487 	/*check whether this is new interface: warmup big buffer once*/
488 	if (wb_info->mcif_warmup_params.start_address.quad_part != 0 &&
489 		wb_info->mcif_warmup_params.region_size != 0) {
490 		/*mmhubbub is shared, so it does not matter which MCIF*/
491 		mcif_wb = dc->res_pool->mcif_wb[0];
492 		/*warmup a big chunk of VM buffer at once*/
493 		warmup_params.start_address.quad_part = wb_info->mcif_warmup_params.start_address.quad_part;
494 		warmup_params.address_increment =  wb_info->mcif_warmup_params.region_size;
495 		warmup_params.region_size = wb_info->mcif_warmup_params.region_size;
496 		warmup_params.p_vmid = wb_info->mcif_warmup_params.p_vmid;
497 
498 		if (warmup_params.address_increment == 0)
499 			warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes;
500 
501 		mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params);
502 		return true;
503 	}
504 	/*following is the original: warmup each DWB's mcif buffer*/
505 	for (i = 0; i < num_dwb; i++) {
506 		mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst];
507 		/*warmup is for VM mode only*/
508 		if (wb_info[i].mcif_buf_params.p_vmid == 0)
509 			return false;
510 
511 		/* Warmup MCIF_WB */
512 		for (i_buf = 0; i_buf < MCIF_BUF_COUNT; i_buf++) {
513 			warmup_params.start_address.quad_part = wb_info[i].mcif_buf_params.luma_address[i_buf];
514 			warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes;
515 			warmup_params.region_size = wb_info[i].mcif_buf_params.luma_pitch * wb_info[i].dwb_params.dest_height;
516 			warmup_params.p_vmid = wb_info[i].mcif_buf_params.p_vmid;
517 			mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params);
518 		}
519 	}
520 	return true;
521 }
522 
dcn30_enable_writeback(struct dc * dc,struct dc_writeback_info * wb_info,struct dc_state * context)523 void dcn30_enable_writeback(
524 		struct dc *dc,
525 		struct dc_writeback_info *wb_info,
526 		struct dc_state *context)
527 {
528 	struct dwbc *dwb;
529 	struct mcif_wb *mcif_wb;
530 
531 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
532 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
533 
534 	DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
535 		__func__, wb_info->dwb_pipe_inst,\
536 		wb_info->mpcc_inst);
537 
538 	/* Warmup interface */
539 	dcn30_mmhubbub_warmup(dc, 1, wb_info);
540 
541 	/* Update writeback pipe */
542 	dcn30_set_writeback(dc, wb_info, context);
543 
544 	/* Enable MCIF_WB */
545 	mcif_wb->funcs->enable_mcif(mcif_wb);
546 	/* Enable DWB */
547 	dwb->funcs->enable(dwb, &wb_info->dwb_params);
548 }
549 
dcn30_disable_writeback(struct dc * dc,unsigned int dwb_pipe_inst)550 void dcn30_disable_writeback(
551 		struct dc *dc,
552 		unsigned int dwb_pipe_inst)
553 {
554 	struct dwbc *dwb;
555 	struct mcif_wb *mcif_wb;
556 
557 	ASSERT(dwb_pipe_inst < MAX_DWB_PIPES);
558 	dwb = dc->res_pool->dwbc[dwb_pipe_inst];
559 	mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst];
560 	DC_LOG_DWB("%s dwb_pipe_inst = %d",\
561 		__func__, dwb_pipe_inst);
562 
563 	/* disable DWB */
564 	dwb->funcs->disable(dwb);
565 	/* disable MCIF */
566 	mcif_wb->funcs->disable_mcif(mcif_wb);
567 	/* disable MPC DWB mux */
568 	dc->res_pool->mpc->funcs->disable_dwb_mux(dc->res_pool->mpc, dwb_pipe_inst);
569 }
570 
dcn30_program_all_writeback_pipes_in_tree(struct dc * dc,const struct dc_stream_state * stream,struct dc_state * context)571 void dcn30_program_all_writeback_pipes_in_tree(
572 		struct dc *dc,
573 		const struct dc_stream_state *stream,
574 		struct dc_state *context)
575 {
576 	struct dc_writeback_info wb_info;
577 	struct dwbc *dwb;
578 	struct dc_stream_status *stream_status = NULL;
579 	int i_wb, i_pipe, i_stream;
580 	DC_LOG_DWB("%s", __func__);
581 
582 	ASSERT(stream);
583 	for (i_stream = 0; i_stream < context->stream_count; i_stream++) {
584 		if (context->streams[i_stream] == stream) {
585 			stream_status = &context->stream_status[i_stream];
586 			break;
587 		}
588 	}
589 	ASSERT(stream_status);
590 
591 	ASSERT(stream->num_wb_info <= dc->res_pool->res_cap->num_dwb);
592 	/* For each writeback pipe */
593 	for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
594 
595 		/* copy writeback info to local non-const so mpcc_inst can be set */
596 		wb_info = stream->writeback_info[i_wb];
597 		if (wb_info.wb_enabled) {
598 
599 			/* get the MPCC instance for writeback_source_plane */
600 			wb_info.mpcc_inst = -1;
601 			for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
602 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
603 
604 				if (!pipe_ctx->plane_state)
605 					continue;
606 
607 				if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
608 					wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
609 					break;
610 				}
611 			}
612 
613 			if (wb_info.mpcc_inst == -1) {
614 				/* Disable writeback pipe and disconnect from MPCC
615 				 * if source plane has been removed
616 				 */
617 				dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
618 				continue;
619 			}
620 
621 			ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
622 			dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
623 			if (dwb->funcs->is_enabled(dwb)) {
624 				/* writeback pipe already enabled, only need to update */
625 				dc->hwss.update_writeback(dc, &wb_info, context);
626 			} else {
627 				/* Enable writeback pipe and connect to MPCC */
628 				dc->hwss.enable_writeback(dc, &wb_info, context);
629 			}
630 		} else {
631 			/* Disable writeback pipe and disconnect from MPCC */
632 			dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
633 		}
634 	}
635 }
636 
dcn30_init_hw(struct dc * dc)637 void dcn30_init_hw(struct dc *dc)
638 {
639 	struct abm **abms = dc->res_pool->multiple_abms;
640 	struct dce_hwseq *hws = dc->hwseq;
641 	struct dc_bios *dcb = dc->ctx->dc_bios;
642 	struct resource_pool *res_pool = dc->res_pool;
643 	int i;
644 	int edp_num;
645 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
646 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
647 
648 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks)
649 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
650 
651 	// Initialize the dccg
652 	if (res_pool->dccg->funcs->dccg_init)
653 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
654 
655 	if (!dcb->funcs->is_accelerated_mode(dcb)) {
656 		hws->funcs.bios_golden_init(dc);
657 		hws->funcs.disable_vga(dc->hwseq);
658 	}
659 
660 	if (dc->debug.enable_mem_low_power.bits.dmcu) {
661 		// Force ERAM to shutdown if DMCU is not enabled
662 		if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
663 			REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
664 		}
665 	}
666 
667 	// Set default OPTC memory power states
668 	if (dc->debug.enable_mem_low_power.bits.optc) {
669 		// Shutdown when unassigned and light sleep in VBLANK
670 		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
671 	}
672 
673 	if (dc->debug.enable_mem_low_power.bits.vga) {
674 		// Power down VGA memory
675 		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
676 	}
677 
678 	if (dc->ctx->dc_bios->fw_info_valid) {
679 		res_pool->ref_clocks.xtalin_clock_inKhz =
680 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
681 
682 		if (res_pool->hubbub) {
683 
684 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
685 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
686 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
687 
688 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
689 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
690 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
691 		} else {
692 			// Not all ASICs have DCCG sw component
693 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
694 					res_pool->ref_clocks.xtalin_clock_inKhz;
695 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
696 					res_pool->ref_clocks.xtalin_clock_inKhz;
697 		}
698 	} else
699 		ASSERT_CRITICAL(false);
700 
701 	for (i = 0; i < dc->link_count; i++) {
702 		/* Power up AND update implementation according to the
703 		 * required signal (which may be different from the
704 		 * default signal on connector).
705 		 */
706 		struct dc_link *link = dc->links[i];
707 
708 		link->link_enc->funcs->hw_init(link->link_enc);
709 
710 		/* Check for enabled DIG to identify enabled display */
711 		if (link->link_enc->funcs->is_dig_enabled &&
712 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
713 			link->link_status.link_active = true;
714 			if (link->link_enc->funcs->fec_is_active &&
715 					link->link_enc->funcs->fec_is_active(link->link_enc))
716 				link->fec_state = dc_link_fec_enabled;
717 		}
718 	}
719 
720 	/* we want to turn off all dp displays before doing detection */
721 	dc->link_srv->blank_all_dp_displays(dc);
722 
723 	if (hws->funcs.enable_power_gating_plane)
724 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
725 
726 	/* If taking control over from VBIOS, we may want to optimize our first
727 	 * mode set, so we need to skip powering down pipes until we know which
728 	 * pipes we want to use.
729 	 * Otherwise, if taking control is not possible, we need to power
730 	 * everything down.
731 	 */
732 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
733 		hws->funcs.init_pipes(dc, dc->current_state);
734 		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
735 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
736 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
737 	}
738 
739 	/* In headless boot cases, DIG may be turned
740 	 * on which causes HW/SW discrepancies.
741 	 * To avoid this, power down hardware on boot
742 	 * if DIG is turned on and seamless boot not enabled
743 	 */
744 	if (!dc->config.seamless_boot_edp_requested) {
745 		struct dc_link *edp_links[MAX_NUM_EDP];
746 		struct dc_link *edp_link = NULL;
747 
748 		dc_get_edp_links(dc, edp_links, &edp_num);
749 		if (edp_num)
750 			edp_link = edp_links[0];
751 		if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
752 				edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
753 				dc->hwss.edp_backlight_control &&
754 				hws->funcs.power_down &&
755 				dc->hwss.edp_power_control) {
756 			dc->hwss.edp_backlight_control(edp_link, false);
757 			hws->funcs.power_down(dc);
758 			dc->hwss.edp_power_control(edp_link, false);
759 		} else {
760 			for (i = 0; i < dc->link_count; i++) {
761 				struct dc_link *link = dc->links[i];
762 
763 				if (link->link_enc->funcs->is_dig_enabled &&
764 						link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
765 						hws->funcs.power_down) {
766 					hws->funcs.power_down(dc);
767 					break;
768 				}
769 
770 			}
771 		}
772 	}
773 
774 	for (i = 0; i < res_pool->audio_count; i++) {
775 		struct audio *audio = res_pool->audios[i];
776 
777 		audio->funcs->hw_init(audio);
778 	}
779 
780 	for (i = 0; i < dc->link_count; i++) {
781 		struct dc_link *link = dc->links[i];
782 
783 		if (link->panel_cntl) {
784 			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
785 			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
786 		}
787 	}
788 
789 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
790 		if (abms[i] != NULL)
791 			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
792 	}
793 
794 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
795 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
796 
797 	if (!dc->debug.disable_clock_gate) {
798 		/* enable all DCN clock gating */
799 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
800 
801 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
802 
803 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
804 	}
805 
806 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
807 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
808 
809 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
810 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
811 
812 	//if softmax is enabled then hardmax will be set by a different call
813 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk &&
814 	    !dc->clk_mgr->dc_mode_softmax_enabled)
815 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
816 
817 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
818 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
819 				dc->res_pool->hubbub, false, false);
820 	if (dc->res_pool->hubbub->funcs->init_crb)
821 		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
822 
823 	// Get DMCUB capabilities
824 	dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
825 	dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
826 	dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
827 }
828 
dcn30_set_avmute(struct pipe_ctx * pipe_ctx,bool enable)829 void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
830 {
831 	if (pipe_ctx == NULL)
832 		return;
833 
834 	if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
835 		pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
836 				pipe_ctx->stream_res.stream_enc,
837 				enable);
838 
839 		/* Wait for two frame to make sure AV mute is sent out */
840 		if (enable && pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
841 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
842 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
843 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
844 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
845 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
846 		}
847 	}
848 }
849 
dcn30_update_info_frame(struct pipe_ctx * pipe_ctx)850 void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
851 {
852 	bool is_hdmi_tmds;
853 	bool is_dp;
854 
855 	ASSERT(pipe_ctx->stream);
856 
857 	if (pipe_ctx->stream_res.stream_enc == NULL)
858 		return;  /* this is not root pipe */
859 
860 	is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
861 	is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
862 
863 	if (!is_hdmi_tmds && !is_dp)
864 		return;
865 
866 	if (is_hdmi_tmds)
867 		pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
868 			pipe_ctx->stream_res.stream_enc,
869 			&pipe_ctx->stream_res.encoder_info_frame);
870 	else {
871 		if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num)
872 			pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num(
873 				pipe_ctx->stream_res.stream_enc,
874 				&pipe_ctx->stream_res.encoder_info_frame);
875 
876 		pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
877 			pipe_ctx->stream_res.stream_enc,
878 			&pipe_ctx->stream_res.encoder_info_frame);
879 	}
880 }
881 
dcn30_program_dmdata_engine(struct pipe_ctx * pipe_ctx)882 void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
883 {
884 	struct dc_stream_state    *stream     = pipe_ctx->stream;
885 	struct hubp               *hubp       = pipe_ctx->plane_res.hubp;
886 	bool                       enable     = false;
887 	struct stream_encoder     *stream_enc = pipe_ctx->stream_res.stream_enc;
888 	enum dynamic_metadata_mode mode       = dc_is_dp_signal(stream->signal)
889 							? dmdata_dp
890 							: dmdata_hdmi;
891 
892 	/* if using dynamic meta, don't set up generic infopackets */
893 	if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
894 		pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
895 		enable = true;
896 	}
897 
898 	if (!hubp)
899 		return;
900 
901 	if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata)
902 		return;
903 
904 	stream_enc->funcs->set_dynamic_metadata(stream_enc, enable,
905 							hubp->inst, mode);
906 }
907 
dcn30_apply_idle_power_optimizations(struct dc * dc,bool enable)908 bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
909 {
910 	union dmub_rb_cmd cmd;
911 	uint32_t tmr_delay = 0, tmr_scale = 0;
912 	struct dc_cursor_attributes cursor_attr = {0};
913 	bool cursor_cache_enable = false;
914 	struct dc_stream_state *stream = NULL;
915 	struct dc_plane_state *plane = NULL;
916 
917 	if (!dc->ctx->dmub_srv)
918 		return false;
919 
920 	if (enable) {
921 		if (dc->current_state) {
922 			int i;
923 
924 			/* First, check no-memory-requests case */
925 			for (i = 0; i < dc->current_state->stream_count; i++) {
926 				if (dc->current_state->stream_status[i].plane_count)
927 					/* Fail eligibility on a visible stream */
928 					break;
929 			}
930 
931 			if (i == dc->current_state->stream_count) {
932 				/* Enable no-memory-requests case */
933 				memset(&cmd, 0, sizeof(cmd));
934 				cmd.mall.header.type = DMUB_CMD__MALL;
935 				cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
936 				cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
937 
938 				dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
939 
940 				return true;
941 			}
942 
943 			stream = dc->current_state->streams[0];
944 			plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL);
945 
946 			if (!stream || !plane)
947 				return false;
948 
949 			if (stream && plane) {
950 				cursor_cache_enable = stream->cursor_position.enable &&
951 						plane->address.grph.cursor_cache_addr.quad_part;
952 				cursor_attr = stream->cursor_attributes;
953 			}
954 
955 			/*
956 			 * Second, check MALL eligibility
957 			 *
958 			 * single display only, single surface only, 8 and 16 bit formats only, no VM,
959 			 * do not use MALL for displays that support PSR as they use D0i3.2 in DMCUB FW
960 			 *
961 			 * TODO: When we implement multi-display, PSR displays will be allowed if there is
962 			 * a non-PSR display present, since in that case we can't do D0i3.2
963 			 */
964 			if (dc->current_state->stream_count == 1 &&
965 					stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
966 					dc->current_state->stream_status[0].plane_count == 1 &&
967 					plane->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F &&
968 					plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 &&
969 					plane->address.page_table_base.quad_part == 0 &&
970 					dc->hwss.does_plane_fit_in_mall &&
971 					dc->hwss.does_plane_fit_in_mall(dc, plane->plane_size.surface_pitch,
972 							plane->plane_size.surface_size.height, plane->format,
973 							cursor_cache_enable ? &cursor_attr : NULL)) {
974 				unsigned int v_total = stream->adjust.v_total_max ?
975 						stream->adjust.v_total_max : stream->timing.v_total;
976 				unsigned int refresh_hz = div_u64((unsigned long long) stream->timing.pix_clk_100hz *
977 						100LL, (v_total * stream->timing.h_total));
978 
979 				/*
980 				 * one frame time in microsec:
981 				 * Delay_Us = 1000000 / refresh
982 				 * dynamic_delay_us = 1000000 / refresh + 2 * stutter_period
983 				 *
984 				 * one frame time modified by 'additional timer percent' (p):
985 				 * Delay_Us_modified = dynamic_delay_us + dynamic_delay_us * p / 100
986 				 *                   = dynamic_delay_us * (1 + p / 100)
987 				 *                   = (1000000 / refresh + 2 * stutter_period) * (100 + p) / 100
988 				 *                   = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh)
989 				 *
990 				 * formula for timer duration based on parameters, from regspec:
991 				 * dynamic_delay_us = 65.28 * (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
992 				 *
993 				 * dynamic_delay_us / 65.28 = (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale
994 				 * (dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale = 64 + MallFrameCacheTmrDly
995 				 * MallFrameCacheTmrDly = ((dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale) - 64
996 				 *                      = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh) / 65.28 / 2^MallFrameCacheTmrScale - 64
997 				 *                      = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (refresh * 6528 * 2^MallFrameCacheTmrScale) - 64
998 				 *
999 				 * need to round up the result of the division before the subtraction
1000 				 */
1001 				unsigned int denom = refresh_hz * 6528;
1002 				unsigned int stutter_period = dc->current_state->perf_params.stutter_period_us;
1003 
1004 				tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
1005 						(100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
1006 						denom) - 64LL;
1007 
1008 				/* In some cases the stutter period is really big (tiny modes) in these
1009 				 * cases MALL cant be enabled, So skip these cases to avoid a ASSERT()
1010 				 *
1011 				 * We can check if stutter_period is more than 1/10th the frame time to
1012 				 * consider if we can actually meet the range of hysteresis timer
1013 				 */
1014 				if (stutter_period > 100000/refresh_hz)
1015 					return false;
1016 
1017 				/* scale should be increased until it fits into 6 bits */
1018 				while (tmr_delay & ~0x3F) {
1019 					tmr_scale++;
1020 
1021 					if (tmr_scale > 3) {
1022 						/* Delay exceeds range of hysteresis timer */
1023 						ASSERT(false);
1024 						return false;
1025 					}
1026 
1027 					denom *= 2;
1028 					tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) *
1029 							(100LL + dc->debug.mall_additional_timer_percent) + denom - 1),
1030 							denom) - 64LL;
1031 				}
1032 
1033 				/* Copy HW cursor */
1034 				if (cursor_cache_enable) {
1035 					memset(&cmd, 0, sizeof(cmd));
1036 					cmd.mall.header.type = DMUB_CMD__MALL;
1037 					cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_COPY_CURSOR;
1038 					cmd.mall.header.payload_bytes =
1039 							sizeof(cmd.mall) - sizeof(cmd.mall.header);
1040 
1041 					switch (cursor_attr.color_format) {
1042 					case CURSOR_MODE_MONO:
1043 						cmd.mall.cursor_bpp = 2;
1044 						break;
1045 					case CURSOR_MODE_COLOR_1BIT_AND:
1046 					case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
1047 					case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
1048 						cmd.mall.cursor_bpp = 32;
1049 						break;
1050 
1051 					case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
1052 					case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
1053 						cmd.mall.cursor_bpp = 64;
1054 						break;
1055 					}
1056 
1057 					cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
1058 					cmd.mall.cursor_copy_dst.quad_part =
1059 							(plane->address.grph.cursor_cache_addr.quad_part + 2047) & ~2047;
1060 					cmd.mall.cursor_width = cursor_attr.width;
1061 					cmd.mall.cursor_height = cursor_attr.height;
1062 					cmd.mall.cursor_pitch = cursor_attr.pitch;
1063 
1064 					dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1065 
1066 					/* Use copied cursor, and it's okay to not switch back */
1067 					cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
1068 					dc_stream_program_cursor_attributes(stream, &cursor_attr);
1069 				}
1070 
1071 				/* Enable MALL */
1072 				memset(&cmd, 0, sizeof(cmd));
1073 				cmd.mall.header.type = DMUB_CMD__MALL;
1074 				cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_ALLOW;
1075 				cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
1076 				cmd.mall.tmr_delay = tmr_delay;
1077 				cmd.mall.tmr_scale = tmr_scale;
1078 				cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
1079 
1080 				dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
1081 
1082 				return true;
1083 			}
1084 		}
1085 
1086 		/* No applicable optimizations */
1087 		return false;
1088 	}
1089 
1090 	/* Disable MALL */
1091 	memset(&cmd, 0, sizeof(cmd));
1092 	cmd.mall.header.type = DMUB_CMD__MALL;
1093 	cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_DISALLOW;
1094 	cmd.mall.header.payload_bytes =
1095 		sizeof(cmd.mall) - sizeof(cmd.mall.header);
1096 
1097 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1098 
1099 	return true;
1100 }
1101 
dcn30_does_plane_fit_in_mall(struct dc * dc,unsigned int pitch,unsigned int height,enum surface_pixel_format format,struct dc_cursor_attributes * cursor_attr)1102 bool dcn30_does_plane_fit_in_mall(struct dc *dc,
1103 		unsigned int pitch,
1104 		unsigned int height,
1105 		enum surface_pixel_format format,
1106 		struct dc_cursor_attributes *cursor_attr)
1107 {
1108 	// add meta size?
1109 	unsigned int surface_size = pitch * height *
1110 			(format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
1111 	unsigned int mall_size = dc->caps.mall_size_total;
1112 	unsigned int cursor_size = 0;
1113 
1114 	if (dc->debug.mall_size_override)
1115 		mall_size = 1024 * 1024 * dc->debug.mall_size_override;
1116 
1117 	if (cursor_attr) {
1118 		cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
1119 
1120 		switch (cursor_attr->color_format) {
1121 		case CURSOR_MODE_MONO:
1122 			cursor_size /= 2;
1123 			break;
1124 		case CURSOR_MODE_COLOR_1BIT_AND:
1125 		case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
1126 		case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
1127 			cursor_size *= 4;
1128 			break;
1129 
1130 		case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
1131 		case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
1132 			cursor_size *= 8;
1133 			break;
1134 		}
1135 	}
1136 
1137 	return (surface_size + cursor_size) < mall_size;
1138 }
1139 
dcn30_hardware_release(struct dc * dc)1140 void dcn30_hardware_release(struct dc *dc)
1141 {
1142 	bool subvp_in_use = false;
1143 	uint32_t i;
1144 
1145 	dc_dmub_srv_p_state_delegate(dc, false, NULL);
1146 	dc_dmub_setup_subvp_dmub_command(dc, dc->current_state, false);
1147 
1148 	/* SubVP treated the same way as FPO. If driver disable and
1149 	 * we are using a SubVP config, disable and force on DCN side
1150 	 * to prevent P-State hang on driver enable.
1151 	 */
1152 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1153 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1154 
1155 		if (!pipe->stream)
1156 			continue;
1157 
1158 		if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_MAIN) {
1159 			subvp_in_use = true;
1160 			break;
1161 		}
1162 	}
1163 	/* If pstate unsupported, or still supported
1164 	 * by firmware, force it supported by dcn
1165 	 */
1166 	if (dc->current_state)
1167 		if ((!dc->clk_mgr->clks.p_state_change_support || subvp_in_use ||
1168 				dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) &&
1169 				dc->res_pool->hubbub->funcs->force_pstate_change_control)
1170 			dc->res_pool->hubbub->funcs->force_pstate_change_control(
1171 					dc->res_pool->hubbub, true, true);
1172 }
1173 
dcn30_set_disp_pattern_generator(const struct dc * dc,struct pipe_ctx * pipe_ctx,enum controller_dp_test_pattern test_pattern,enum controller_dp_color_space color_space,enum dc_color_depth color_depth,const struct tg_color * solid_color,int width,int height,int offset)1174 void dcn30_set_disp_pattern_generator(const struct dc *dc,
1175 		struct pipe_ctx *pipe_ctx,
1176 		enum controller_dp_test_pattern test_pattern,
1177 		enum controller_dp_color_space color_space,
1178 		enum dc_color_depth color_depth,
1179 		const struct tg_color *solid_color,
1180 		int width, int height, int offset)
1181 {
1182 	pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
1183 			color_space, color_depth, solid_color, width, height, offset);
1184 }
1185 
dcn30_prepare_bandwidth(struct dc * dc,struct dc_state * context)1186 void dcn30_prepare_bandwidth(struct dc *dc,
1187 	struct dc_state *context)
1188 {
1189 	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !dc->clk_mgr->clks.fw_based_mclk_switching) {
1190 		dc->optimized_required = true;
1191 		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1192 	}
1193 
1194 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1195 		if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1196 				context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1197 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
1198 
1199 	dcn20_prepare_bandwidth(dc, context);
1200 
1201 	if (!dc->clk_mgr->clks.fw_based_mclk_switching)
1202 		dc_dmub_srv_p_state_delegate(dc, false, context);
1203 }
1204 
dcn30_wait_for_all_pending_updates(const struct pipe_ctx * pipe_ctx)1205 void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx)
1206 {
1207 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
1208 	bool pending_updates = false;
1209 	unsigned int i;
1210 
1211 	if (tg && tg->funcs->is_tg_enabled(tg)) {
1212 		// Poll for 100ms maximum
1213 		for (i = 0; i < 100000; i++) {
1214 			pending_updates = false;
1215 			if (tg->funcs->get_optc_double_buffer_pending)
1216 				pending_updates |= tg->funcs->get_optc_double_buffer_pending(tg);
1217 
1218 			if (tg->funcs->get_otg_double_buffer_pending)
1219 				pending_updates |= tg->funcs->get_otg_double_buffer_pending(tg);
1220 
1221 			if (tg->funcs->get_pipe_update_pending && pipe_ctx->plane_state)
1222 				pending_updates |= tg->funcs->get_pipe_update_pending(tg);
1223 
1224 			if (!pending_updates)
1225 				break;
1226 
1227 			udelay(1);
1228 		}
1229 	}
1230 }
1231