xref: /linux/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c (revision 42b16d3ac371a2fac9b6f08fd75f23f34ba3955a)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hwseq.h"
33 #include "dcn10/dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10/dcn10_optc.h"
38 #include "dcn10/dcn10_dpp.h"
39 #include "dcn10/dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10/dcn10_hubp.h"
46 #include "dcn10/dcn10_hubbub.h"
47 #include "dcn10/dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59 #include "dc_state_priv.h"
60 
61 #define DC_LOGGER \
62 	dc_logger
63 #define DC_LOGGER_INIT(logger) \
64 	struct dal_logger *dc_logger = logger
65 
66 #define CTX \
67 	hws->ctx
68 #define REG(reg)\
69 	hws->regs->reg
70 
71 #undef FN
72 #define FN(reg_name, field_name) \
73 	hws->shifts->field_name, hws->masks->field_name
74 
75 /*print is 17 wide, first two characters are spaces*/
76 #define DTN_INFO_MICRO_SEC(ref_cycle) \
77 	print_microsec(dc_ctx, log_ctx, ref_cycle)
78 
79 #define GAMMA_HW_POINTS_NUM 256
80 
81 #define PGFSM_POWER_ON 0
82 #define PGFSM_POWER_OFF 2
83 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)84 static void print_microsec(struct dc_context *dc_ctx,
85 			   struct dc_log_buffer_ctx *log_ctx,
86 			   uint32_t ref_cycle)
87 {
88 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
89 	static const unsigned int frac = 1000;
90 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
91 
92 	DTN_INFO("  %11d.%03d",
93 			us_x10 / frac,
94 			us_x10 % frac);
95 }
96 
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)97 void dcn10_lock_all_pipes(struct dc *dc,
98 	struct dc_state *context,
99 	bool lock)
100 {
101 	struct pipe_ctx *pipe_ctx;
102 	struct pipe_ctx *old_pipe_ctx;
103 	struct timing_generator *tg;
104 	int i;
105 
106 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
107 		old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
108 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
109 		tg = pipe_ctx->stream_res.tg;
110 
111 		/*
112 		 * Only lock the top pipe's tg to prevent redundant
113 		 * (un)locking. Also skip if pipe is disabled.
114 		 */
115 		if (pipe_ctx->top_pipe ||
116 		    !pipe_ctx->stream ||
117 		    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
118 		    !tg->funcs->is_tg_enabled(tg) ||
119 			dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
120 			continue;
121 
122 		if (lock)
123 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
124 		else
125 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
126 	}
127 }
128 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)129 static void log_mpc_crc(struct dc *dc,
130 	struct dc_log_buffer_ctx *log_ctx)
131 {
132 	struct dc_context *dc_ctx = dc->ctx;
133 	struct dce_hwseq *hws = dc->hwseq;
134 
135 	if (REG(MPC_CRC_RESULT_GB))
136 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
137 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
138 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
139 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
140 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
141 }
142 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)143 static void dcn10_log_hubbub_state(struct dc *dc,
144 				   struct dc_log_buffer_ctx *log_ctx)
145 {
146 	struct dc_context *dc_ctx = dc->ctx;
147 	struct dcn_hubbub_wm wm;
148 	int i;
149 
150 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
151 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
152 
153 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
154 			"         sr_enter          sr_exit  dram_clk_change\n");
155 
156 	for (i = 0; i < 4; i++) {
157 		struct dcn_hubbub_wm_set *s;
158 
159 		s = &wm.sets[i];
160 		DTN_INFO("WM_Set[%d]:", s->wm_set);
161 		DTN_INFO_MICRO_SEC(s->data_urgent);
162 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
163 		DTN_INFO_MICRO_SEC(s->sr_enter);
164 		DTN_INFO_MICRO_SEC(s->sr_exit);
165 		DTN_INFO_MICRO_SEC(s->dram_clk_change);
166 		DTN_INFO("\n");
167 	}
168 
169 	DTN_INFO("\n");
170 }
171 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)172 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
173 {
174 	struct dc_context *dc_ctx = dc->ctx;
175 	struct resource_pool *pool = dc->res_pool;
176 	int i;
177 
178 	DTN_INFO(
179 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
180 	for (i = 0; i < pool->pipe_count; i++) {
181 		struct hubp *hubp = pool->hubps[i];
182 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
183 
184 		hubp->funcs->hubp_read_state(hubp);
185 
186 		if (!s->blank_en) {
187 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
188 					hubp->inst,
189 					s->pixel_format,
190 					s->inuse_addr_hi,
191 					s->viewport_width,
192 					s->viewport_height,
193 					s->rotation_angle,
194 					s->h_mirror_en,
195 					s->sw_mode,
196 					s->dcc_en,
197 					s->blank_en,
198 					s->clock_en,
199 					s->ttu_disable,
200 					s->underflow_status);
201 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
202 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
203 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
204 			DTN_INFO("\n");
205 		}
206 	}
207 
208 	DTN_INFO("\n=========RQ========\n");
209 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
210 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
211 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
212 	for (i = 0; i < pool->pipe_count; i++) {
213 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
214 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
215 
216 		if (!s->blank_en)
217 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
218 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
219 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
220 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
221 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
222 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
223 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
224 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
225 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
226 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
227 	}
228 
229 	DTN_INFO("========DLG========\n");
230 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
231 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
232 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
233 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
234 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
235 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
236 			"  x_rp_dlay  x_rr_sfl  rc_td_grp\n");
237 
238 	for (i = 0; i < pool->pipe_count; i++) {
239 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
240 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
241 
242 		if (!s->blank_en)
243 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
244 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
245 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh %xh\n",
246 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
247 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
248 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
249 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
250 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
251 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
252 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
253 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
254 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
255 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
256 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
257 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
258 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
259 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
260 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
261 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
262 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
263 				dlg_regs->xfc_reg_remote_surface_flip_latency, dlg_regs->refcyc_per_tdlut_group);
264 	}
265 
266 	DTN_INFO("========TTU========\n");
267 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
268 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
269 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
270 	for (i = 0; i < pool->pipe_count; i++) {
271 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
272 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
273 
274 		if (!s->blank_en)
275 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
276 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
277 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
278 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
279 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
280 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
281 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
282 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
283 	}
284 	DTN_INFO("\n");
285 }
286 
dcn10_log_color_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)287 static void dcn10_log_color_state(struct dc *dc,
288 				  struct dc_log_buffer_ctx *log_ctx)
289 {
290 	struct dc_context *dc_ctx = dc->ctx;
291 	struct resource_pool *pool = dc->res_pool;
292 	bool is_gamut_remap_available = false;
293 	int i;
294 
295 	DTN_INFO("DPP:    IGAM format    IGAM mode    DGAM mode    RGAM mode"
296 		 "  GAMUT adjust  "
297 		 "C11        C12        C13        C14        "
298 		 "C21        C22        C23        C24        "
299 		 "C31        C32        C33        C34        \n");
300 	for (i = 0; i < pool->pipe_count; i++) {
301 		struct dpp *dpp = pool->dpps[i];
302 		struct dcn_dpp_state s = {0};
303 
304 		dpp->funcs->dpp_read_state(dpp, &s);
305 		if (dpp->funcs->dpp_get_gamut_remap) {
306 			dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
307 			is_gamut_remap_available = true;
308 		}
309 
310 		if (!s.is_enabled)
311 			continue;
312 
313 		DTN_INFO("[%2d]:  %11xh  %11s    %9s    %9s",
314 				dpp->inst,
315 				s.igam_input_format,
316 				(s.igam_lut_mode == 0) ? "BypassFixed" :
317 					((s.igam_lut_mode == 1) ? "BypassFloat" :
318 					((s.igam_lut_mode == 2) ? "RAM" :
319 					((s.igam_lut_mode == 3) ? "RAM" :
320 								 "Unknown"))),
321 				(s.dgam_lut_mode == 0) ? "Bypass" :
322 					((s.dgam_lut_mode == 1) ? "sRGB" :
323 					((s.dgam_lut_mode == 2) ? "Ycc" :
324 					((s.dgam_lut_mode == 3) ? "RAM" :
325 					((s.dgam_lut_mode == 4) ? "RAM" :
326 								 "Unknown")))),
327 				(s.rgam_lut_mode == 0) ? "Bypass" :
328 					((s.rgam_lut_mode == 1) ? "sRGB" :
329 					((s.rgam_lut_mode == 2) ? "Ycc" :
330 					((s.rgam_lut_mode == 3) ? "RAM" :
331 					((s.rgam_lut_mode == 4) ? "RAM" :
332 								 "Unknown")))));
333 		if (is_gamut_remap_available)
334 			DTN_INFO("  %12s  "
335 				 "%010lld %010lld %010lld %010lld "
336 				 "%010lld %010lld %010lld %010lld "
337 				 "%010lld %010lld %010lld %010lld",
338 				 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
339 					((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"),
340 				 s.gamut_remap.temperature_matrix[0].value,
341 				 s.gamut_remap.temperature_matrix[1].value,
342 				 s.gamut_remap.temperature_matrix[2].value,
343 				 s.gamut_remap.temperature_matrix[3].value,
344 				 s.gamut_remap.temperature_matrix[4].value,
345 				 s.gamut_remap.temperature_matrix[5].value,
346 				 s.gamut_remap.temperature_matrix[6].value,
347 				 s.gamut_remap.temperature_matrix[7].value,
348 				 s.gamut_remap.temperature_matrix[8].value,
349 				 s.gamut_remap.temperature_matrix[9].value,
350 				 s.gamut_remap.temperature_matrix[10].value,
351 				 s.gamut_remap.temperature_matrix[11].value);
352 
353 		DTN_INFO("\n");
354 	}
355 	DTN_INFO("\n");
356 	DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d"
357 		 "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
358 		 "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d"
359 		 "  blnd_lut:%d  oscs:%d\n\n",
360 		 dc->caps.color.dpp.input_lut_shared,
361 		 dc->caps.color.dpp.icsc,
362 		 dc->caps.color.dpp.dgam_ram,
363 		 dc->caps.color.dpp.dgam_rom_caps.srgb,
364 		 dc->caps.color.dpp.dgam_rom_caps.bt2020,
365 		 dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
366 		 dc->caps.color.dpp.dgam_rom_caps.pq,
367 		 dc->caps.color.dpp.dgam_rom_caps.hlg,
368 		 dc->caps.color.dpp.post_csc,
369 		 dc->caps.color.dpp.gamma_corr,
370 		 dc->caps.color.dpp.dgam_rom_for_yuv,
371 		 dc->caps.color.dpp.hw_3d_lut,
372 		 dc->caps.color.dpp.ogam_ram,
373 		 dc->caps.color.dpp.ocsc);
374 
375 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
376 	for (i = 0; i < pool->mpcc_count; i++) {
377 		struct mpcc_state s = {0};
378 
379 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
380 		if (s.opp_id != 0xf)
381 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
382 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
383 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
384 				s.idle);
385 	}
386 	DTN_INFO("\n");
387 	DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
388 		 dc->caps.color.mpc.gamut_remap,
389 		 dc->caps.color.mpc.num_3dluts,
390 		 dc->caps.color.mpc.ogam_ram,
391 		 dc->caps.color.mpc.ocsc);
392 }
393 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)394 void dcn10_log_hw_state(struct dc *dc,
395 			struct dc_log_buffer_ctx *log_ctx)
396 {
397 	struct dc_context *dc_ctx = dc->ctx;
398 	struct resource_pool *pool = dc->res_pool;
399 	int i;
400 
401 	DTN_INFO_BEGIN();
402 
403 	dcn10_log_hubbub_state(dc, log_ctx);
404 
405 	dcn10_log_hubp_states(dc, log_ctx);
406 
407 	if (dc->hwss.log_color_state)
408 		dc->hwss.log_color_state(dc, log_ctx);
409 	else
410 		dcn10_log_color_state(dc, log_ctx);
411 
412 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
413 
414 	for (i = 0; i < pool->timing_generator_count; i++) {
415 		struct timing_generator *tg = pool->timing_generators[i];
416 		struct dcn_otg_state s = {0};
417 		/* Read shared OTG state registers for all DCNx */
418 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
419 
420 		/*
421 		 * For DCN2 and greater, a register on the OPP is used to
422 		 * determine if the CRTC is blanked instead of the OTG. So use
423 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
424 		 *
425 		 * TODO: Implement DCN-specific read_otg_state hooks.
426 		 */
427 		if (pool->opps[i]->funcs->dpg_is_blanked)
428 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
429 		else
430 			s.blank_enabled = tg->funcs->is_blanked(tg);
431 
432 		//only print if OTG master is enabled
433 		if ((s.otg_enabled & 1) == 0)
434 			continue;
435 
436 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
437 				tg->inst,
438 				s.v_blank_start,
439 				s.v_blank_end,
440 				s.v_sync_a_start,
441 				s.v_sync_a_end,
442 				s.v_sync_a_pol,
443 				s.v_total_max,
444 				s.v_total_min,
445 				s.v_total_max_sel,
446 				s.v_total_min_sel,
447 				s.h_blank_start,
448 				s.h_blank_end,
449 				s.h_sync_a_start,
450 				s.h_sync_a_end,
451 				s.h_sync_a_pol,
452 				s.h_total,
453 				s.v_total,
454 				s.underflow_occurred_status,
455 				s.blank_enabled);
456 
457 		// Clear underflow for debug purposes
458 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
459 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
460 		// it from here without affecting the original intent.
461 		tg->funcs->clear_optc_underflow(tg);
462 	}
463 	DTN_INFO("\n");
464 
465 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
466 	// TODO: Update golden log header to reflect this name change
467 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
468 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
469 		struct display_stream_compressor *dsc = pool->dscs[i];
470 		struct dcn_dsc_state s = {0};
471 
472 		dsc->funcs->dsc_read_state(dsc, &s);
473 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
474 		dsc->inst,
475 			s.dsc_clock_en,
476 			s.dsc_slice_width,
477 			s.dsc_bits_per_pixel);
478 		DTN_INFO("\n");
479 	}
480 	DTN_INFO("\n");
481 
482 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
483 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
484 	for (i = 0; i < pool->stream_enc_count; i++) {
485 		struct stream_encoder *enc = pool->stream_enc[i];
486 		struct enc_state s = {0};
487 
488 		if (enc->funcs->enc_read_state) {
489 			enc->funcs->enc_read_state(enc, &s);
490 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
491 				enc->id,
492 				s.dsc_mode,
493 				s.sec_gsp_pps_line_num,
494 				s.vbid6_line_reference,
495 				s.vbid6_line_num,
496 				s.sec_gsp_pps_enable,
497 				s.sec_stream_enable);
498 			DTN_INFO("\n");
499 		}
500 	}
501 	DTN_INFO("\n");
502 
503 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
504 	for (i = 0; i < dc->link_count; i++) {
505 		struct link_encoder *lenc = dc->links[i]->link_enc;
506 
507 		struct link_enc_state s = {0};
508 
509 		if (lenc && lenc->funcs->read_state) {
510 			lenc->funcs->read_state(lenc, &s);
511 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
512 				i,
513 				s.dphy_fec_en,
514 				s.dphy_fec_ready_shadow,
515 				s.dphy_fec_active_status,
516 				s.dp_link_training_complete);
517 			DTN_INFO("\n");
518 		}
519 	}
520 	DTN_INFO("\n");
521 
522 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
523 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
524 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
525 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
526 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
527 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
528 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
529 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
530 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
531 
532 	log_mpc_crc(dc, log_ctx);
533 
534 	{
535 		if (pool->hpo_dp_stream_enc_count > 0) {
536 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
537 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
538 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
539 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
540 
541 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
542 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
543 
544 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
545 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
546 							hpo_dp_se_state.stream_enc_enabled,
547 							hpo_dp_se_state.otg_inst,
548 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
549 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
550 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
551 							(hpo_dp_se_state.component_depth == 0) ? 6 :
552 									((hpo_dp_se_state.component_depth == 1) ? 8 :
553 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
554 							hpo_dp_se_state.vid_stream_enabled,
555 							hpo_dp_se_state.sdp_enabled,
556 							hpo_dp_se_state.compressed_format,
557 							hpo_dp_se_state.mapped_to_link_enc);
558 				}
559 			}
560 
561 			DTN_INFO("\n");
562 		}
563 
564 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
565 		if (pool->hpo_dp_link_enc_count) {
566 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
567 
568 			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
569 				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
570 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
571 
572 				if (hpo_dp_link_enc->funcs->read_state) {
573 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
574 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
575 							hpo_dp_link_enc->inst,
576 							hpo_dp_le_state.link_enc_enabled,
577 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
578 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
579 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
580 							hpo_dp_le_state.lane_count,
581 							hpo_dp_le_state.stream_src[0],
582 							hpo_dp_le_state.slot_count[0],
583 							hpo_dp_le_state.vc_rate_x[0],
584 							hpo_dp_le_state.vc_rate_y[0]);
585 					DTN_INFO("\n");
586 				}
587 			}
588 
589 			DTN_INFO("\n");
590 		}
591 	}
592 
593 	DTN_INFO_END();
594 }
595 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)596 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
597 {
598 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
599 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
600 
601 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
602 		tg->funcs->clear_optc_underflow(tg);
603 		return true;
604 	}
605 
606 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
607 		hubp->funcs->hubp_clear_underflow(hubp);
608 		return true;
609 	}
610 	return false;
611 }
612 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)613 void dcn10_enable_power_gating_plane(
614 	struct dce_hwseq *hws,
615 	bool enable)
616 {
617 	bool force_on = true; /* disable power gating */
618 
619 	if (enable)
620 		force_on = false;
621 
622 	/* DCHUBP0/1/2/3 */
623 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
624 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
625 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
626 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
627 
628 	/* DPP0/1/2/3 */
629 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
630 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
631 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
632 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
633 }
634 
dcn10_disable_vga(struct dce_hwseq * hws)635 void dcn10_disable_vga(
636 	struct dce_hwseq *hws)
637 {
638 	unsigned int in_vga1_mode = 0;
639 	unsigned int in_vga2_mode = 0;
640 	unsigned int in_vga3_mode = 0;
641 	unsigned int in_vga4_mode = 0;
642 
643 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
644 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
645 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
646 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
647 
648 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
649 			in_vga3_mode == 0 && in_vga4_mode == 0)
650 		return;
651 
652 	REG_WRITE(D1VGA_CONTROL, 0);
653 	REG_WRITE(D2VGA_CONTROL, 0);
654 	REG_WRITE(D3VGA_CONTROL, 0);
655 	REG_WRITE(D4VGA_CONTROL, 0);
656 
657 	/* HW Engineer's Notes:
658 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
659 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
660 	 *
661 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
662 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
663 	 */
664 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
665 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
666 }
667 
668 /**
669  * dcn10_dpp_pg_control - DPP power gate control.
670  *
671  * @hws: dce_hwseq reference.
672  * @dpp_inst: DPP instance reference.
673  * @power_on: true if we want to enable power gate, false otherwise.
674  *
675  * Enable or disable power gate in the specific DPP instance.
676  */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)677 void dcn10_dpp_pg_control(
678 		struct dce_hwseq *hws,
679 		unsigned int dpp_inst,
680 		bool power_on)
681 {
682 	uint32_t power_gate = power_on ? 0 : 1;
683 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
684 
685 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
686 		return;
687 	if (REG(DOMAIN1_PG_CONFIG) == 0)
688 		return;
689 
690 	switch (dpp_inst) {
691 	case 0: /* DPP0 */
692 		REG_UPDATE(DOMAIN1_PG_CONFIG,
693 				DOMAIN1_POWER_GATE, power_gate);
694 
695 		REG_WAIT(DOMAIN1_PG_STATUS,
696 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
697 				1, 1000);
698 		break;
699 	case 1: /* DPP1 */
700 		REG_UPDATE(DOMAIN3_PG_CONFIG,
701 				DOMAIN3_POWER_GATE, power_gate);
702 
703 		REG_WAIT(DOMAIN3_PG_STATUS,
704 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
705 				1, 1000);
706 		break;
707 	case 2: /* DPP2 */
708 		REG_UPDATE(DOMAIN5_PG_CONFIG,
709 				DOMAIN5_POWER_GATE, power_gate);
710 
711 		REG_WAIT(DOMAIN5_PG_STATUS,
712 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
713 				1, 1000);
714 		break;
715 	case 3: /* DPP3 */
716 		REG_UPDATE(DOMAIN7_PG_CONFIG,
717 				DOMAIN7_POWER_GATE, power_gate);
718 
719 		REG_WAIT(DOMAIN7_PG_STATUS,
720 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
721 				1, 1000);
722 		break;
723 	default:
724 		BREAK_TO_DEBUGGER();
725 		break;
726 	}
727 }
728 
729 /**
730  * dcn10_hubp_pg_control - HUBP power gate control.
731  *
732  * @hws: dce_hwseq reference.
733  * @hubp_inst: DPP instance reference.
734  * @power_on: true if we want to enable power gate, false otherwise.
735  *
736  * Enable or disable power gate in the specific HUBP instance.
737  */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)738 void dcn10_hubp_pg_control(
739 		struct dce_hwseq *hws,
740 		unsigned int hubp_inst,
741 		bool power_on)
742 {
743 	uint32_t power_gate = power_on ? 0 : 1;
744 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
745 
746 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
747 		return;
748 	if (REG(DOMAIN0_PG_CONFIG) == 0)
749 		return;
750 
751 	switch (hubp_inst) {
752 	case 0: /* DCHUBP0 */
753 		REG_UPDATE(DOMAIN0_PG_CONFIG,
754 				DOMAIN0_POWER_GATE, power_gate);
755 
756 		REG_WAIT(DOMAIN0_PG_STATUS,
757 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
758 				1, 1000);
759 		break;
760 	case 1: /* DCHUBP1 */
761 		REG_UPDATE(DOMAIN2_PG_CONFIG,
762 				DOMAIN2_POWER_GATE, power_gate);
763 
764 		REG_WAIT(DOMAIN2_PG_STATUS,
765 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
766 				1, 1000);
767 		break;
768 	case 2: /* DCHUBP2 */
769 		REG_UPDATE(DOMAIN4_PG_CONFIG,
770 				DOMAIN4_POWER_GATE, power_gate);
771 
772 		REG_WAIT(DOMAIN4_PG_STATUS,
773 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
774 				1, 1000);
775 		break;
776 	case 3: /* DCHUBP3 */
777 		REG_UPDATE(DOMAIN6_PG_CONFIG,
778 				DOMAIN6_POWER_GATE, power_gate);
779 
780 		REG_WAIT(DOMAIN6_PG_STATUS,
781 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
782 				1, 1000);
783 		break;
784 	default:
785 		BREAK_TO_DEBUGGER();
786 		break;
787 	}
788 }
789 
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)790 static void power_on_plane_resources(
791 	struct dce_hwseq *hws,
792 	int plane_id)
793 {
794 	DC_LOGGER_INIT(hws->ctx->logger);
795 
796 	if (hws->funcs.dpp_root_clock_control)
797 		hws->funcs.dpp_root_clock_control(hws, plane_id, true);
798 
799 	if (REG(DC_IP_REQUEST_CNTL)) {
800 		REG_SET(DC_IP_REQUEST_CNTL, 0,
801 				IP_REQUEST_EN, 1);
802 
803 		if (hws->funcs.dpp_pg_control)
804 			hws->funcs.dpp_pg_control(hws, plane_id, true);
805 
806 		if (hws->funcs.hubp_pg_control)
807 			hws->funcs.hubp_pg_control(hws, plane_id, true);
808 
809 		REG_SET(DC_IP_REQUEST_CNTL, 0,
810 				IP_REQUEST_EN, 0);
811 		DC_LOG_DEBUG(
812 				"Un-gated front end for pipe %d\n", plane_id);
813 	}
814 }
815 
undo_DEGVIDCN10_253_wa(struct dc * dc)816 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
817 {
818 	struct dce_hwseq *hws = dc->hwseq;
819 	struct hubp *hubp = dc->res_pool->hubps[0];
820 
821 	if (!hws->wa_state.DEGVIDCN10_253_applied)
822 		return;
823 
824 	hubp->funcs->set_blank(hubp, true);
825 
826 	REG_SET(DC_IP_REQUEST_CNTL, 0,
827 			IP_REQUEST_EN, 1);
828 
829 	hws->funcs.hubp_pg_control(hws, 0, false);
830 	REG_SET(DC_IP_REQUEST_CNTL, 0,
831 			IP_REQUEST_EN, 0);
832 
833 	hws->wa_state.DEGVIDCN10_253_applied = false;
834 }
835 
apply_DEGVIDCN10_253_wa(struct dc * dc)836 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
837 {
838 	struct dce_hwseq *hws = dc->hwseq;
839 	struct hubp *hubp = dc->res_pool->hubps[0];
840 	int i;
841 
842 	if (dc->debug.disable_stutter)
843 		return;
844 
845 	if (!hws->wa.DEGVIDCN10_253)
846 		return;
847 
848 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
849 		if (!dc->res_pool->hubps[i]->power_gated)
850 			return;
851 	}
852 
853 	/* all pipe power gated, apply work around to enable stutter. */
854 
855 	REG_SET(DC_IP_REQUEST_CNTL, 0,
856 			IP_REQUEST_EN, 1);
857 
858 	hws->funcs.hubp_pg_control(hws, 0, true);
859 	REG_SET(DC_IP_REQUEST_CNTL, 0,
860 			IP_REQUEST_EN, 0);
861 
862 	hubp->funcs->set_hubp_blank_en(hubp, false);
863 	hws->wa_state.DEGVIDCN10_253_applied = true;
864 }
865 
dcn10_bios_golden_init(struct dc * dc)866 void dcn10_bios_golden_init(struct dc *dc)
867 {
868 	struct dce_hwseq *hws = dc->hwseq;
869 	struct dc_bios *bp = dc->ctx->dc_bios;
870 	int i;
871 	bool allow_self_fresh_force_enable = true;
872 
873 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
874 		return;
875 
876 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
877 		allow_self_fresh_force_enable =
878 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
879 
880 
881 	/* WA for making DF sleep when idle after resume from S0i3.
882 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
883 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
884 	 * before calling command table and it changed to 1 after,
885 	 * it should be set back to 0.
886 	 */
887 
888 	/* initialize dcn global */
889 	bp->funcs->enable_disp_power_gating(bp,
890 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
891 
892 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
893 		/* initialize dcn per pipe */
894 		bp->funcs->enable_disp_power_gating(bp,
895 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
896 	}
897 
898 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
899 		if (allow_self_fresh_force_enable == false &&
900 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
901 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
902 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
903 
904 }
905 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)906 static void false_optc_underflow_wa(
907 		struct dc *dc,
908 		const struct dc_stream_state *stream,
909 		struct timing_generator *tg)
910 {
911 	int i;
912 	bool underflow;
913 
914 	if (!dc->hwseq->wa.false_optc_underflow)
915 		return;
916 
917 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
918 
919 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
920 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
921 
922 		if (old_pipe_ctx->stream != stream)
923 			continue;
924 
925 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
926 	}
927 
928 	if (tg->funcs->set_blank_data_double_buffer)
929 		tg->funcs->set_blank_data_double_buffer(tg, true);
930 
931 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
932 		tg->funcs->clear_optc_underflow(tg);
933 }
934 
calculate_vready_offset_for_group(struct pipe_ctx * pipe)935 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
936 {
937 	struct pipe_ctx *other_pipe;
938 	int vready_offset = pipe->pipe_dlg_param.vready_offset;
939 
940 	/* Always use the largest vready_offset of all connected pipes */
941 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
942 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
943 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
944 	}
945 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
946 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
947 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
948 	}
949 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
950 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
951 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
952 	}
953 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
954 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
955 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
956 	}
957 
958 	return vready_offset;
959 }
960 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)961 enum dc_status dcn10_enable_stream_timing(
962 		struct pipe_ctx *pipe_ctx,
963 		struct dc_state *context,
964 		struct dc *dc)
965 {
966 	struct dc_stream_state *stream = pipe_ctx->stream;
967 	enum dc_color_space color_space;
968 	struct tg_color black_color = {0};
969 
970 	/* by upper caller loop, pipe0 is parent pipe and be called first.
971 	 * back end is set up by for pipe0. Other children pipe share back end
972 	 * with pipe 0. No program is needed.
973 	 */
974 	if (pipe_ctx->top_pipe != NULL)
975 		return DC_OK;
976 
977 	/* TODO check if timing_changed, disable stream if timing changed */
978 
979 	/* HW program guide assume display already disable
980 	 * by unplug sequence. OTG assume stop.
981 	 */
982 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
983 
984 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
985 			pipe_ctx->clock_source,
986 			&pipe_ctx->stream_res.pix_clk_params,
987 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
988 			&pipe_ctx->pll_settings)) {
989 		BREAK_TO_DEBUGGER();
990 		return DC_ERROR_UNEXPECTED;
991 	}
992 
993 	if (dc_is_hdmi_tmds_signal(stream->signal)) {
994 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
995 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
996 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
997 		else
998 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
999 	}
1000 
1001 	pipe_ctx->stream_res.tg->funcs->program_timing(
1002 			pipe_ctx->stream_res.tg,
1003 			&stream->timing,
1004 			calculate_vready_offset_for_group(pipe_ctx),
1005 			pipe_ctx->pipe_dlg_param.vstartup_start,
1006 			pipe_ctx->pipe_dlg_param.vupdate_offset,
1007 			pipe_ctx->pipe_dlg_param.vupdate_width,
1008 			pipe_ctx->pipe_dlg_param.pstate_keepout,
1009 			pipe_ctx->stream->signal,
1010 			true);
1011 
1012 #if 0 /* move to after enable_crtc */
1013 	/* TODO: OPP FMT, ABM. etc. should be done here. */
1014 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
1015 
1016 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
1017 
1018 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
1019 				pipe_ctx->stream_res.opp,
1020 				&stream->bit_depth_params,
1021 				&stream->clamping);
1022 #endif
1023 	/* program otg blank color */
1024 	color_space = stream->output_color_space;
1025 	color_space_to_black_color(dc, color_space, &black_color);
1026 
1027 	/*
1028 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
1029 	 * alternate between Cb and Cr, so both channels need the pixel
1030 	 * value for Y
1031 	 */
1032 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1033 		black_color.color_r_cr = black_color.color_g_y;
1034 
1035 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
1036 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
1037 				pipe_ctx->stream_res.tg,
1038 				&black_color);
1039 
1040 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
1041 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
1042 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
1043 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
1044 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
1045 	}
1046 
1047 	/* VTG is  within DCHUB command block. DCFCLK is always on */
1048 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
1049 		BREAK_TO_DEBUGGER();
1050 		return DC_ERROR_UNEXPECTED;
1051 	}
1052 
1053 	/* TODO program crtc source select for non-virtual signal*/
1054 	/* TODO program FMT */
1055 	/* TODO setup link_enc */
1056 	/* TODO set stream attributes */
1057 	/* TODO program audio */
1058 	/* TODO enable stream if timing changed */
1059 	/* TODO unblank stream if DP */
1060 
1061 	return DC_OK;
1062 }
1063 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1064 static void dcn10_reset_back_end_for_pipe(
1065 		struct dc *dc,
1066 		struct pipe_ctx *pipe_ctx,
1067 		struct dc_state *context)
1068 {
1069 	int i;
1070 	struct dc_link *link;
1071 	DC_LOGGER_INIT(dc->ctx->logger);
1072 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1073 		pipe_ctx->stream = NULL;
1074 		return;
1075 	}
1076 
1077 	link = pipe_ctx->stream->link;
1078 	/* DPMS may already disable or */
1079 	/* dpms_off status is incorrect due to fastboot
1080 	 * feature. When system resume from S4 with second
1081 	 * screen only, the dpms_off would be true but
1082 	 * VBIOS lit up eDP, so check link status too.
1083 	 */
1084 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1085 		dc->link_srv->set_dpms_off(pipe_ctx);
1086 	else if (pipe_ctx->stream_res.audio)
1087 		dc->hwss.disable_audio_stream(pipe_ctx);
1088 
1089 	if (pipe_ctx->stream_res.audio) {
1090 		/*disable az_endpoint*/
1091 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1092 
1093 		/*free audio*/
1094 		if (dc->caps.dynamic_audio == true) {
1095 			/*we have to dynamic arbitrate the audio endpoints*/
1096 			/*we free the resource, need reset is_audio_acquired*/
1097 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1098 					pipe_ctx->stream_res.audio, false);
1099 			pipe_ctx->stream_res.audio = NULL;
1100 		}
1101 	}
1102 
1103 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1104 	 * back end share by all pipes and will be disable only when disable
1105 	 * parent pipe.
1106 	 */
1107 	if (pipe_ctx->top_pipe == NULL) {
1108 
1109 		if (pipe_ctx->stream_res.abm)
1110 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1111 
1112 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1113 
1114 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1115 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
1116 			pipe_ctx->stream_res.tg->funcs->set_drr(
1117 					pipe_ctx->stream_res.tg, NULL);
1118 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1119 			pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1120 	}
1121 
1122 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1123 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1124 			break;
1125 
1126 	if (i == dc->res_pool->pipe_count)
1127 		return;
1128 
1129 	pipe_ctx->stream = NULL;
1130 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1131 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1132 }
1133 
dcn10_hw_wa_force_recovery(struct dc * dc)1134 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1135 {
1136 	struct hubp *hubp ;
1137 	unsigned int i;
1138 
1139 	if (!dc->debug.recovery_enabled)
1140 		return false;
1141 	/*
1142 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1143 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1144 	DCHUBP_CNTL:HUBP_DISABLE=1
1145 	DCHUBP_CNTL:HUBP_DISABLE=0
1146 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1147 	DCSURF_PRIMARY_SURFACE_ADDRESS
1148 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1149 	*/
1150 
1151 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1152 		struct pipe_ctx *pipe_ctx =
1153 			&dc->current_state->res_ctx.pipe_ctx[i];
1154 		if (pipe_ctx != NULL) {
1155 			hubp = pipe_ctx->plane_res.hubp;
1156 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1157 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1158 				hubp->funcs->set_hubp_blank_en(hubp, true);
1159 		}
1160 	}
1161 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1162 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1163 
1164 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1165 		struct pipe_ctx *pipe_ctx =
1166 			&dc->current_state->res_ctx.pipe_ctx[i];
1167 		if (pipe_ctx != NULL) {
1168 			hubp = pipe_ctx->plane_res.hubp;
1169 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1170 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1171 				hubp->funcs->hubp_disable_control(hubp, true);
1172 		}
1173 	}
1174 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1175 		struct pipe_ctx *pipe_ctx =
1176 			&dc->current_state->res_ctx.pipe_ctx[i];
1177 		if (pipe_ctx != NULL) {
1178 			hubp = pipe_ctx->plane_res.hubp;
1179 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1180 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1181 				hubp->funcs->hubp_disable_control(hubp, true);
1182 		}
1183 	}
1184 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1185 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1186 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1187 		struct pipe_ctx *pipe_ctx =
1188 			&dc->current_state->res_ctx.pipe_ctx[i];
1189 		if (pipe_ctx != NULL) {
1190 			hubp = pipe_ctx->plane_res.hubp;
1191 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1192 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1193 				hubp->funcs->set_hubp_blank_en(hubp, true);
1194 		}
1195 	}
1196 	return true;
1197 
1198 }
1199 
dcn10_verify_allow_pstate_change_high(struct dc * dc)1200 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1201 {
1202 	struct hubbub *hubbub = dc->res_pool->hubbub;
1203 	static bool should_log_hw_state; /* prevent hw state log by default */
1204 
1205 	if (!hubbub->funcs->verify_allow_pstate_change_high)
1206 		return;
1207 
1208 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1209 		int i = 0;
1210 
1211 		if (should_log_hw_state)
1212 			dcn10_log_hw_state(dc, NULL);
1213 
1214 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1215 		BREAK_TO_DEBUGGER();
1216 		if (dcn10_hw_wa_force_recovery(dc)) {
1217 			/*check again*/
1218 			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1219 				BREAK_TO_DEBUGGER();
1220 		}
1221 	}
1222 }
1223 
1224 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1225 void dcn10_plane_atomic_disconnect(struct dc *dc,
1226 		struct dc_state *state,
1227 		struct pipe_ctx *pipe_ctx)
1228 {
1229 	struct dce_hwseq *hws = dc->hwseq;
1230 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1231 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1232 	struct mpc *mpc = dc->res_pool->mpc;
1233 	struct mpc_tree *mpc_tree_params;
1234 	struct mpcc *mpcc_to_remove = NULL;
1235 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1236 
1237 	mpc_tree_params = &(opp->mpc_tree_params);
1238 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1239 
1240 	/*Already reset*/
1241 	if (mpcc_to_remove == NULL)
1242 		return;
1243 
1244 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1245 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1246 	// so don't wait for MPCC_IDLE in the programming sequence
1247 	if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
1248 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1249 
1250 	dc->optimized_required = true;
1251 
1252 	if (hubp->funcs->hubp_disconnect)
1253 		hubp->funcs->hubp_disconnect(hubp);
1254 
1255 	if (dc->debug.sanity_checks)
1256 		hws->funcs.verify_allow_pstate_change_high(dc);
1257 }
1258 
1259 /**
1260  * dcn10_plane_atomic_power_down - Power down plane components.
1261  *
1262  * @dc: dc struct reference. used for grab hwseq.
1263  * @dpp: dpp struct reference.
1264  * @hubp: hubp struct reference.
1265  *
1266  * Keep in mind that this operation requires a power gate configuration;
1267  * however, requests for switch power gate are precisely controlled to avoid
1268  * problems. For this reason, power gate request is usually disabled. This
1269  * function first needs to enable the power gate request before disabling DPP
1270  * and HUBP. Finally, it disables the power gate request again.
1271  */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1272 void dcn10_plane_atomic_power_down(struct dc *dc,
1273 		struct dpp *dpp,
1274 		struct hubp *hubp)
1275 {
1276 	struct dce_hwseq *hws = dc->hwseq;
1277 	DC_LOGGER_INIT(dc->ctx->logger);
1278 
1279 	if (REG(DC_IP_REQUEST_CNTL)) {
1280 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1281 				IP_REQUEST_EN, 1);
1282 
1283 		if (hws->funcs.dpp_pg_control)
1284 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1285 
1286 		if (hws->funcs.hubp_pg_control)
1287 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1288 
1289 		dpp->funcs->dpp_reset(dpp);
1290 
1291 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1292 				IP_REQUEST_EN, 0);
1293 		DC_LOG_DEBUG(
1294 				"Power gated front end %d\n", hubp->inst);
1295 	}
1296 
1297 	if (hws->funcs.dpp_root_clock_control)
1298 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1299 }
1300 
1301 /* disable HW used by plane.
1302  * note:  cannot disable until disconnect is complete
1303  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1304 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1305 {
1306 	struct dce_hwseq *hws = dc->hwseq;
1307 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1308 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1309 	int opp_id = hubp->opp_id;
1310 
1311 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1312 
1313 	hubp->funcs->hubp_clk_cntl(hubp, false);
1314 
1315 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1316 
1317 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1318 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1319 				pipe_ctx->stream_res.opp,
1320 				false);
1321 
1322 	hubp->power_gated = true;
1323 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1324 
1325 	hws->funcs.plane_atomic_power_down(dc,
1326 			pipe_ctx->plane_res.dpp,
1327 			pipe_ctx->plane_res.hubp);
1328 
1329 	pipe_ctx->stream = NULL;
1330 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1331 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1332 	pipe_ctx->top_pipe = NULL;
1333 	pipe_ctx->bottom_pipe = NULL;
1334 	pipe_ctx->plane_state = NULL;
1335 }
1336 
dcn10_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1337 void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
1338 {
1339 	struct dce_hwseq *hws = dc->hwseq;
1340 	DC_LOGGER_INIT(dc->ctx->logger);
1341 
1342 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1343 		return;
1344 
1345 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1346 
1347 	apply_DEGVIDCN10_253_wa(dc);
1348 
1349 	DC_LOG_DC("Power down front end %d\n",
1350 					pipe_ctx->pipe_idx);
1351 }
1352 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1353 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1354 {
1355 	int i;
1356 	struct dce_hwseq *hws = dc->hwseq;
1357 	struct hubbub *hubbub = dc->res_pool->hubbub;
1358 	bool can_apply_seamless_boot = false;
1359 	bool tg_enabled[MAX_PIPES] = {false};
1360 
1361 	for (i = 0; i < context->stream_count; i++) {
1362 		if (context->streams[i]->apply_seamless_boot_optimization) {
1363 			can_apply_seamless_boot = true;
1364 			break;
1365 		}
1366 	}
1367 
1368 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1369 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1370 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1371 
1372 		/* There is assumption that pipe_ctx is not mapping irregularly
1373 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1374 		 * we will use the pipe, so don't disable
1375 		 */
1376 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1377 			continue;
1378 
1379 		/* Blank controller using driver code instead of
1380 		 * command table.
1381 		 */
1382 		if (tg->funcs->is_tg_enabled(tg)) {
1383 			if (hws->funcs.init_blank != NULL) {
1384 				hws->funcs.init_blank(dc, tg);
1385 				tg->funcs->lock(tg);
1386 			} else {
1387 				tg->funcs->lock(tg);
1388 				tg->funcs->set_blank(tg, true);
1389 				hwss_wait_for_blank_complete(tg);
1390 			}
1391 		}
1392 	}
1393 
1394 	/* Reset det size */
1395 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1396 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1397 		struct hubp *hubp = dc->res_pool->hubps[i];
1398 
1399 		/* Do not need to reset for seamless boot */
1400 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1401 			continue;
1402 
1403 		if (hubbub && hubp) {
1404 			if (hubbub->funcs->program_det_size)
1405 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1406 			if (hubbub->funcs->program_det_segments)
1407 				hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
1408 		}
1409 	}
1410 
1411 	/* num_opp will be equal to number of mpcc */
1412 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1413 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1414 
1415 		/* Cannot reset the MPC mux if seamless boot */
1416 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1417 			continue;
1418 
1419 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1420 				dc->res_pool->mpc, i);
1421 	}
1422 
1423 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1424 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1425 		struct hubp *hubp = dc->res_pool->hubps[i];
1426 		struct dpp *dpp = dc->res_pool->dpps[i];
1427 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1428 
1429 		/* There is assumption that pipe_ctx is not mapping irregularly
1430 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1431 		 * we will use the pipe, so don't disable
1432 		 */
1433 		if (can_apply_seamless_boot &&
1434 			pipe_ctx->stream != NULL &&
1435 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1436 				pipe_ctx->stream_res.tg)) {
1437 			// Enable double buffering for OTG_BLANK no matter if
1438 			// seamless boot is enabled or not to suppress global sync
1439 			// signals when OTG blanked. This is to prevent pipe from
1440 			// requesting data while in PSR.
1441 			tg->funcs->tg_init(tg);
1442 			hubp->power_gated = true;
1443 			tg_enabled[i] = true;
1444 			continue;
1445 		}
1446 
1447 		/* Disable on the current state so the new one isn't cleared. */
1448 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1449 
1450 		dpp->funcs->dpp_reset(dpp);
1451 
1452 		pipe_ctx->stream_res.tg = tg;
1453 		pipe_ctx->pipe_idx = i;
1454 
1455 		pipe_ctx->plane_res.hubp = hubp;
1456 		pipe_ctx->plane_res.dpp = dpp;
1457 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1458 		hubp->mpcc_id = dpp->inst;
1459 		hubp->opp_id = OPP_ID_INVALID;
1460 		hubp->power_gated = false;
1461 
1462 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1463 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1464 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1465 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1466 
1467 		hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
1468 
1469 		if (tg->funcs->is_tg_enabled(tg))
1470 			tg->funcs->unlock(tg);
1471 
1472 		dc->hwss.disable_plane(dc, context, pipe_ctx);
1473 
1474 		pipe_ctx->stream_res.tg = NULL;
1475 		pipe_ctx->plane_res.hubp = NULL;
1476 
1477 		if (tg->funcs->is_tg_enabled(tg)) {
1478 			if (tg->funcs->init_odm)
1479 				tg->funcs->init_odm(tg);
1480 		}
1481 
1482 		tg->funcs->tg_init(tg);
1483 	}
1484 
1485 	/* Clean up MPC tree */
1486 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1487 		if (tg_enabled[i]) {
1488 			if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
1489 				if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
1490 					int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
1491 
1492 					if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
1493 						dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1494 				}
1495 			}
1496 		}
1497 	}
1498 
1499 	/* Power gate DSCs */
1500 	if (hws->funcs.dsc_pg_control != NULL) {
1501 		uint32_t num_opps = 0;
1502 		uint32_t opp_id_src0 = OPP_ID_INVALID;
1503 		uint32_t opp_id_src1 = OPP_ID_INVALID;
1504 
1505 		// Step 1: To find out which OPTC is running & OPTC DSC is ON
1506 		// We can't use res_pool->res_cap->num_timing_generator to check
1507 		// Because it records display pipes default setting built in driver,
1508 		// not display pipes of the current chip.
1509 		// Some ASICs would be fused display pipes less than the default setting.
1510 		// In dcnxx_resource_construct function, driver would obatin real information.
1511 		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1512 			uint32_t optc_dsc_state = 0;
1513 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1514 
1515 			if (tg->funcs->is_tg_enabled(tg)) {
1516 				if (tg->funcs->get_dsc_status)
1517 					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1518 				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1519 				// non-zero value is DSC enabled
1520 				if (optc_dsc_state != 0) {
1521 					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1522 					break;
1523 				}
1524 			}
1525 		}
1526 
1527 		// Step 2: To power down DSC but skip DSC  of running OPTC
1528 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1529 			struct dcn_dsc_state s  = {0};
1530 
1531 			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1532 
1533 			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1534 				s.dsc_clock_en && s.dsc_fw_en)
1535 				continue;
1536 
1537 			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1538 		}
1539 	}
1540 }
1541 
dcn10_init_hw(struct dc * dc)1542 void dcn10_init_hw(struct dc *dc)
1543 {
1544 	int i;
1545 	struct abm *abm = dc->res_pool->abm;
1546 	struct dmcu *dmcu = dc->res_pool->dmcu;
1547 	struct dce_hwseq *hws = dc->hwseq;
1548 	struct dc_bios *dcb = dc->ctx->dc_bios;
1549 	struct resource_pool *res_pool = dc->res_pool;
1550 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1551 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
1552 	bool   is_optimized_init_done = false;
1553 
1554 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1555 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1556 
1557 	/* Align bw context with hw config when system resume. */
1558 	if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1559 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1560 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1561 	}
1562 
1563 	// Initialize the dccg
1564 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1565 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1566 
1567 	if (!dcb->funcs->is_accelerated_mode(dcb))
1568 		hws->funcs.disable_vga(dc->hwseq);
1569 
1570 	if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1571 		hws->funcs.bios_golden_init(dc);
1572 
1573 
1574 	if (dc->ctx->dc_bios->fw_info_valid) {
1575 		res_pool->ref_clocks.xtalin_clock_inKhz =
1576 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1577 
1578 		if (res_pool->dccg && res_pool->hubbub) {
1579 
1580 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1581 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1582 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1583 
1584 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1585 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
1586 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1587 		} else {
1588 			// Not all ASICs have DCCG sw component
1589 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
1590 					res_pool->ref_clocks.xtalin_clock_inKhz;
1591 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
1592 					res_pool->ref_clocks.xtalin_clock_inKhz;
1593 		}
1594 	} else
1595 		ASSERT_CRITICAL(false);
1596 
1597 	for (i = 0; i < dc->link_count; i++) {
1598 		/* Power up AND update implementation according to the
1599 		 * required signal (which may be different from the
1600 		 * default signal on connector).
1601 		 */
1602 		struct dc_link *link = dc->links[i];
1603 
1604 		if (!is_optimized_init_done)
1605 			link->link_enc->funcs->hw_init(link->link_enc);
1606 
1607 		/* Check for enabled DIG to identify enabled display */
1608 		if (link->link_enc->funcs->is_dig_enabled &&
1609 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1610 			link->link_status.link_active = true;
1611 			if (link->link_enc->funcs->fec_is_active &&
1612 					link->link_enc->funcs->fec_is_active(link->link_enc))
1613 				link->fec_state = dc_link_fec_enabled;
1614 		}
1615 	}
1616 
1617 	/* we want to turn off all dp displays before doing detection */
1618 	dc->link_srv->blank_all_dp_displays(dc);
1619 
1620 	if (hws->funcs.enable_power_gating_plane)
1621 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1622 
1623 	/* If taking control over from VBIOS, we may want to optimize our first
1624 	 * mode set, so we need to skip powering down pipes until we know which
1625 	 * pipes we want to use.
1626 	 * Otherwise, if taking control is not possible, we need to power
1627 	 * everything down.
1628 	 */
1629 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1630 		if (!is_optimized_init_done) {
1631 			hws->funcs.init_pipes(dc, dc->current_state);
1632 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1633 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1634 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1635 		}
1636 	}
1637 
1638 	if (!is_optimized_init_done) {
1639 
1640 		for (i = 0; i < res_pool->audio_count; i++) {
1641 			struct audio *audio = res_pool->audios[i];
1642 
1643 			audio->funcs->hw_init(audio);
1644 		}
1645 
1646 		for (i = 0; i < dc->link_count; i++) {
1647 			struct dc_link *link = dc->links[i];
1648 
1649 			if (link->panel_cntl) {
1650 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1651 				user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
1652 			}
1653 		}
1654 
1655 		if (abm != NULL)
1656 			abm->funcs->abm_init(abm, backlight, user_level);
1657 
1658 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1659 			dmcu->funcs->dmcu_init(dmcu);
1660 	}
1661 
1662 	if (abm != NULL && dmcu != NULL)
1663 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1664 
1665 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1666 	if (!is_optimized_init_done)
1667 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1668 
1669 	if (!dc->debug.disable_clock_gate) {
1670 		/* enable all DCN clock gating */
1671 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1672 
1673 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1674 
1675 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1676 	}
1677 
1678 	if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
1679 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1680 }
1681 
1682 /* In headless boot cases, DIG may be turned
1683  * on which causes HW/SW discrepancies.
1684  * To avoid this, power down hardware on boot
1685  * if DIG is turned on
1686  */
dcn10_power_down_on_boot(struct dc * dc)1687 void dcn10_power_down_on_boot(struct dc *dc)
1688 {
1689 	struct dc_link *edp_links[MAX_NUM_EDP];
1690 	struct dc_link *edp_link = NULL;
1691 	int edp_num;
1692 	int i = 0;
1693 
1694 	dc_get_edp_links(dc, edp_links, &edp_num);
1695 	if (edp_num)
1696 		edp_link = edp_links[0];
1697 
1698 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1699 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1700 			dc->hwseq->funcs.edp_backlight_control &&
1701 			dc->hwseq->funcs.power_down &&
1702 			dc->hwss.edp_power_control) {
1703 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1704 		dc->hwseq->funcs.power_down(dc);
1705 		dc->hwss.edp_power_control(edp_link, false);
1706 	} else {
1707 		for (i = 0; i < dc->link_count; i++) {
1708 			struct dc_link *link = dc->links[i];
1709 
1710 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1711 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1712 					dc->hwseq->funcs.power_down) {
1713 				dc->hwseq->funcs.power_down(dc);
1714 				break;
1715 			}
1716 
1717 		}
1718 	}
1719 
1720 	/*
1721 	 * Call update_clocks with empty context
1722 	 * to send DISPLAY_OFF
1723 	 * Otherwise DISPLAY_OFF may not be asserted
1724 	 */
1725 	if (dc->clk_mgr->funcs->set_low_power_state)
1726 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1727 }
1728 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1729 void dcn10_reset_hw_ctx_wrap(
1730 		struct dc *dc,
1731 		struct dc_state *context)
1732 {
1733 	int i;
1734 	struct dce_hwseq *hws = dc->hwseq;
1735 
1736 	/* Reset Back End*/
1737 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1738 		struct pipe_ctx *pipe_ctx_old =
1739 			&dc->current_state->res_ctx.pipe_ctx[i];
1740 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1741 
1742 		if (!pipe_ctx_old->stream)
1743 			continue;
1744 
1745 		if (pipe_ctx_old->top_pipe)
1746 			continue;
1747 
1748 		if (!pipe_ctx->stream ||
1749 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1750 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1751 
1752 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1753 			if (hws->funcs.enable_stream_gating)
1754 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1755 			if (old_clk)
1756 				old_clk->funcs->cs_power_down(old_clk);
1757 		}
1758 	}
1759 }
1760 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1761 static bool patch_address_for_sbs_tb_stereo(
1762 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1763 {
1764 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1765 	bool sec_split = pipe_ctx->top_pipe &&
1766 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1767 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1768 		(pipe_ctx->stream->timing.timing_3d_format ==
1769 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1770 		 pipe_ctx->stream->timing.timing_3d_format ==
1771 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1772 		*addr = plane_state->address.grph_stereo.left_addr;
1773 		plane_state->address.grph_stereo.left_addr =
1774 		plane_state->address.grph_stereo.right_addr;
1775 		return true;
1776 	} else {
1777 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1778 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1779 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1780 			plane_state->address.grph_stereo.right_addr =
1781 			plane_state->address.grph_stereo.left_addr;
1782 			plane_state->address.grph_stereo.right_meta_addr =
1783 			plane_state->address.grph_stereo.left_meta_addr;
1784 		}
1785 	}
1786 	return false;
1787 }
1788 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1789 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1790 {
1791 	bool addr_patched = false;
1792 	PHYSICAL_ADDRESS_LOC addr;
1793 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1794 
1795 	if (plane_state == NULL)
1796 		return;
1797 
1798 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1799 
1800 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1801 			pipe_ctx->plane_res.hubp,
1802 			&plane_state->address,
1803 			plane_state->flip_immediate);
1804 
1805 	plane_state->status.requested_address = plane_state->address;
1806 
1807 	if (plane_state->flip_immediate)
1808 		plane_state->status.current_address = plane_state->address;
1809 
1810 	if (addr_patched)
1811 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1812 }
1813 
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1814 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1815 			const struct dc_plane_state *plane_state)
1816 {
1817 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1818 	const struct dc_transfer_func *tf = NULL;
1819 	bool result = true;
1820 
1821 	if (dpp_base == NULL)
1822 		return false;
1823 
1824 	tf = &plane_state->in_transfer_func;
1825 
1826 	if (!dpp_base->ctx->dc->debug.always_use_regamma
1827 		&& !plane_state->gamma_correction.is_identity
1828 			&& dce_use_lut(plane_state->format))
1829 		dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction);
1830 
1831 	if (tf->type == TF_TYPE_PREDEFINED) {
1832 		switch (tf->tf) {
1833 		case TRANSFER_FUNCTION_SRGB:
1834 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1835 			break;
1836 		case TRANSFER_FUNCTION_BT709:
1837 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1838 			break;
1839 		case TRANSFER_FUNCTION_LINEAR:
1840 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1841 			break;
1842 		case TRANSFER_FUNCTION_PQ:
1843 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1844 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1845 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1846 			result = true;
1847 			break;
1848 		default:
1849 			result = false;
1850 			break;
1851 		}
1852 	} else if (tf->type == TF_TYPE_BYPASS) {
1853 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1854 	} else {
1855 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1856 					&dpp_base->degamma_params);
1857 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1858 				&dpp_base->degamma_params);
1859 		result = true;
1860 	}
1861 
1862 	return result;
1863 }
1864 
1865 #define MAX_NUM_HW_POINTS 0x200
1866 
log_tf(struct dc_context * ctx,const struct dc_transfer_func * tf,uint32_t hw_points_num)1867 static void log_tf(struct dc_context *ctx,
1868 				const struct dc_transfer_func *tf, uint32_t hw_points_num)
1869 {
1870 	// DC_LOG_GAMMA is default logging of all hw points
1871 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1872 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1873 	int i = 0;
1874 
1875 	DC_LOG_GAMMA("Gamma Correction TF");
1876 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1877 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1878 
1879 	for (i = 0; i < hw_points_num; i++) {
1880 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1881 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1882 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1883 	}
1884 
1885 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1886 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1887 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1888 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1889 	}
1890 }
1891 
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1892 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1893 				const struct dc_stream_state *stream)
1894 {
1895 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1896 
1897 	if (!stream)
1898 		return false;
1899 
1900 	if (dpp == NULL)
1901 		return false;
1902 
1903 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1904 
1905 	if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
1906 	    stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB)
1907 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1908 
1909 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1910 	 * update.
1911 	 */
1912 	else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1913 			&stream->out_transfer_func,
1914 			&dpp->regamma_params, false)) {
1915 		dpp->funcs->dpp_program_regamma_pwl(
1916 				dpp,
1917 				&dpp->regamma_params, OPP_REGAMMA_USER);
1918 	} else
1919 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1920 
1921 	if (stream->ctx) {
1922 		log_tf(stream->ctx,
1923 				&stream->out_transfer_func,
1924 				dpp->regamma_params.hw_points_num);
1925 	}
1926 
1927 	return true;
1928 }
1929 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1930 void dcn10_pipe_control_lock(
1931 	struct dc *dc,
1932 	struct pipe_ctx *pipe,
1933 	bool lock)
1934 {
1935 	struct dce_hwseq *hws = dc->hwseq;
1936 
1937 	/* use TG master update lock to lock everything on the TG
1938 	 * therefore only top pipe need to lock
1939 	 */
1940 	if (!pipe || pipe->top_pipe)
1941 		return;
1942 
1943 	if (dc->debug.sanity_checks)
1944 		hws->funcs.verify_allow_pstate_change_high(dc);
1945 
1946 	if (lock)
1947 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1948 	else
1949 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1950 
1951 	if (dc->debug.sanity_checks)
1952 		hws->funcs.verify_allow_pstate_change_high(dc);
1953 }
1954 
1955 /**
1956  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1957  *
1958  * Software keepout workaround to prevent cursor update locking from stalling
1959  * out cursor updates indefinitely or from old values from being retained in
1960  * the case where the viewport changes in the same frame as the cursor.
1961  *
1962  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1963  * too close to VUPDATE, then stall out until VUPDATE finishes.
1964  *
1965  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1966  *       to avoid the need for this workaround.
1967  *
1968  * @dc: Current DC state
1969  * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1970  *
1971  * Return: void
1972  */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1973 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1974 {
1975 	struct dc_stream_state *stream = pipe_ctx->stream;
1976 	struct crtc_position position;
1977 	uint32_t vupdate_start, vupdate_end;
1978 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1979 	unsigned int us_per_line, us_vupdate;
1980 
1981 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1982 		return;
1983 
1984 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1985 		return;
1986 
1987 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1988 				       &vupdate_end);
1989 
1990 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1991 	vpos = position.vertical_count;
1992 
1993 	/* Avoid wraparound calculation issues */
1994 	vupdate_start += stream->timing.v_total;
1995 	vupdate_end += stream->timing.v_total;
1996 	vpos += stream->timing.v_total;
1997 
1998 	if (vpos <= vupdate_start) {
1999 		/* VPOS is in VACTIVE or back porch. */
2000 		lines_to_vupdate = vupdate_start - vpos;
2001 	} else if (vpos > vupdate_end) {
2002 		/* VPOS is in the front porch. */
2003 		return;
2004 	} else {
2005 		/* VPOS is in VUPDATE. */
2006 		lines_to_vupdate = 0;
2007 	}
2008 
2009 	/* Calculate time until VUPDATE in microseconds. */
2010 	us_per_line =
2011 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
2012 	us_to_vupdate = lines_to_vupdate * us_per_line;
2013 
2014 	/* 70 us is a conservative estimate of cursor update time*/
2015 	if (us_to_vupdate > 70)
2016 		return;
2017 
2018 	/* Stall out until the cursor update completes. */
2019 	if (vupdate_end < vupdate_start)
2020 		vupdate_end += stream->timing.v_total;
2021 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
2022 	udelay(us_to_vupdate + us_vupdate);
2023 }
2024 
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)2025 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
2026 {
2027 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
2028 	if (!pipe || pipe->top_pipe)
2029 		return;
2030 
2031 	/* Prevent cursor lock from stalling out cursor updates. */
2032 	if (lock)
2033 		delay_cursor_until_vupdate(dc, pipe);
2034 
2035 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
2036 		union dmub_hw_lock_flags hw_locks = { 0 };
2037 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2038 
2039 		hw_locks.bits.lock_cursor = 1;
2040 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
2041 
2042 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2043 					lock,
2044 					&hw_locks,
2045 					&inst_flags);
2046 	} else
2047 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
2048 				pipe->stream_res.opp->inst, lock);
2049 }
2050 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)2051 static bool wait_for_reset_trigger_to_occur(
2052 	struct dc_context *dc_ctx,
2053 	struct timing_generator *tg)
2054 {
2055 	bool rc = false;
2056 
2057 	DC_LOGGER_INIT(dc_ctx->logger);
2058 
2059 	/* To avoid endless loop we wait at most
2060 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2061 	const uint32_t frames_to_wait_on_triggered_reset = 10;
2062 	int i;
2063 
2064 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2065 
2066 		if (!tg->funcs->is_counter_moving(tg)) {
2067 			DC_ERROR("TG counter is not moving!\n");
2068 			break;
2069 		}
2070 
2071 		if (tg->funcs->did_triggered_reset_occur(tg)) {
2072 			rc = true;
2073 			/* usually occurs at i=1 */
2074 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2075 					i);
2076 			break;
2077 		}
2078 
2079 		/* Wait for one frame. */
2080 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2081 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2082 	}
2083 
2084 	if (false == rc)
2085 		DC_ERROR("GSL: Timeout on reset trigger!\n");
2086 
2087 	return rc;
2088 }
2089 
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2090 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2091 				      uint64_t *denominator,
2092 				      bool checkUint32Bounary)
2093 {
2094 	int i;
2095 	bool ret = checkUint32Bounary == false;
2096 	uint64_t max_int32 = 0xffffffff;
2097 	uint64_t num, denom;
2098 	static const uint16_t prime_numbers[] = {
2099 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2100 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2101 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2102 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2103 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2104 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2105 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2106 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2107 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2108 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2109 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2110 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2111 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2112 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2113 		941, 947, 953, 967, 971, 977, 983, 991, 997};
2114 	int count = ARRAY_SIZE(prime_numbers);
2115 
2116 	num = *numerator;
2117 	denom = *denominator;
2118 	for (i = 0; i < count; i++) {
2119 		uint32_t num_remainder, denom_remainder;
2120 		uint64_t num_result, denom_result;
2121 		if (checkUint32Bounary &&
2122 			num <= max_int32 && denom <= max_int32) {
2123 			ret = true;
2124 			break;
2125 		}
2126 		do {
2127 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2128 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2129 			if (num_remainder == 0 && denom_remainder == 0) {
2130 				num = num_result;
2131 				denom = denom_result;
2132 			}
2133 		} while (num_remainder == 0 && denom_remainder == 0);
2134 	}
2135 	*numerator = num;
2136 	*denominator = denom;
2137 	return ret;
2138 }
2139 
is_low_refresh_rate(struct pipe_ctx * pipe)2140 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2141 {
2142 	uint32_t master_pipe_refresh_rate =
2143 		pipe->stream->timing.pix_clk_100hz * 100 /
2144 		pipe->stream->timing.h_total /
2145 		pipe->stream->timing.v_total;
2146 	return master_pipe_refresh_rate <= 30;
2147 }
2148 
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2149 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2150 				 bool account_low_refresh_rate)
2151 {
2152 	uint32_t clock_divider = 1;
2153 	uint32_t numpipes = 1;
2154 
2155 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2156 		clock_divider *= 2;
2157 
2158 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2159 		clock_divider *= 2;
2160 
2161 	while (pipe->next_odm_pipe) {
2162 		pipe = pipe->next_odm_pipe;
2163 		numpipes++;
2164 	}
2165 	clock_divider *= numpipes;
2166 
2167 	return clock_divider;
2168 }
2169 
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2170 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2171 				    struct pipe_ctx *grouped_pipes[])
2172 {
2173 	struct dc_context *dc_ctx = dc->ctx;
2174 	int i, master = -1, embedded = -1;
2175 	struct dc_crtc_timing *hw_crtc_timing;
2176 	uint64_t phase[MAX_PIPES];
2177 	uint64_t modulo[MAX_PIPES];
2178 	unsigned int pclk = 0;
2179 
2180 	uint32_t embedded_pix_clk_100hz;
2181 	uint16_t embedded_h_total;
2182 	uint16_t embedded_v_total;
2183 	uint32_t dp_ref_clk_100hz =
2184 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2185 
2186 	DC_LOGGER_INIT(dc_ctx->logger);
2187 
2188 	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2189 	if (!hw_crtc_timing)
2190 		return master;
2191 
2192 	if (dc->config.vblank_alignment_dto_params &&
2193 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2194 		embedded_h_total =
2195 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2196 		embedded_v_total =
2197 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2198 		embedded_pix_clk_100hz =
2199 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2200 
2201 		for (i = 0; i < group_size; i++) {
2202 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2203 					grouped_pipes[i]->stream_res.tg,
2204 					&hw_crtc_timing[i]);
2205 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2206 				dc->res_pool->dp_clock_source,
2207 				grouped_pipes[i]->stream_res.tg->inst,
2208 				&pclk);
2209 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2210 			if (dc_is_embedded_signal(
2211 					grouped_pipes[i]->stream->signal)) {
2212 				embedded = i;
2213 				master = i;
2214 				phase[i] = embedded_pix_clk_100hz*(uint64_t)100;
2215 				modulo[i] = dp_ref_clk_100hz*100;
2216 			} else {
2217 
2218 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2219 					hw_crtc_timing[i].h_total*
2220 					hw_crtc_timing[i].v_total;
2221 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2222 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2223 					embedded_h_total*
2224 					embedded_v_total;
2225 
2226 				if (reduceSizeAndFraction(&phase[i],
2227 						&modulo[i], true) == false) {
2228 					/*
2229 					 * this will help to stop reporting
2230 					 * this timing synchronizable
2231 					 */
2232 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2233 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2234 				}
2235 			}
2236 		}
2237 
2238 		for (i = 0; i < group_size; i++) {
2239 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2240 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2241 					dc->res_pool->dp_clock_source,
2242 					grouped_pipes[i]->stream_res.tg->inst,
2243 					phase[i], modulo[i]);
2244 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2245 					dc->res_pool->dp_clock_source,
2246 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2247 				grouped_pipes[i]->stream->timing.pix_clk_100hz =
2248 					pclk*get_clock_divider(grouped_pipes[i], false);
2249 				if (master == -1)
2250 					master = i;
2251 			}
2252 		}
2253 
2254 	}
2255 
2256 	kfree(hw_crtc_timing);
2257 	return master;
2258 }
2259 
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2260 void dcn10_enable_vblanks_synchronization(
2261 	struct dc *dc,
2262 	int group_index,
2263 	int group_size,
2264 	struct pipe_ctx *grouped_pipes[])
2265 {
2266 	struct dc_context *dc_ctx = dc->ctx;
2267 	struct output_pixel_processor *opp;
2268 	struct timing_generator *tg;
2269 	int i, width = 0, height = 0, master;
2270 
2271 	DC_LOGGER_INIT(dc_ctx->logger);
2272 
2273 	for (i = 1; i < group_size; i++) {
2274 		opp = grouped_pipes[i]->stream_res.opp;
2275 		tg = grouped_pipes[i]->stream_res.tg;
2276 		tg->funcs->get_otg_active_size(tg, &width, &height);
2277 
2278 		if (!tg->funcs->is_tg_enabled(tg)) {
2279 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2280 			return;
2281 		}
2282 
2283 		if (opp->funcs->opp_program_dpg_dimensions)
2284 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2285 	}
2286 
2287 	for (i = 0; i < group_size; i++) {
2288 		if (grouped_pipes[i]->stream == NULL)
2289 			continue;
2290 		grouped_pipes[i]->stream->vblank_synchronized = false;
2291 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2292 	}
2293 
2294 	DC_SYNC_INFO("Aligning DP DTOs\n");
2295 
2296 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2297 
2298 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2299 
2300 	if (master >= 0) {
2301 		for (i = 0; i < group_size; i++) {
2302 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2303 				grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2304 					grouped_pipes[master]->stream_res.tg,
2305 					grouped_pipes[i]->stream_res.tg,
2306 					grouped_pipes[master]->stream->timing.pix_clk_100hz,
2307 					grouped_pipes[i]->stream->timing.pix_clk_100hz,
2308 					get_clock_divider(grouped_pipes[master], false),
2309 					get_clock_divider(grouped_pipes[i], false));
2310 			grouped_pipes[i]->stream->vblank_synchronized = true;
2311 		}
2312 		grouped_pipes[master]->stream->vblank_synchronized = true;
2313 		DC_SYNC_INFO("Sync complete\n");
2314 	}
2315 
2316 	for (i = 1; i < group_size; i++) {
2317 		opp = grouped_pipes[i]->stream_res.opp;
2318 		tg = grouped_pipes[i]->stream_res.tg;
2319 		tg->funcs->get_otg_active_size(tg, &width, &height);
2320 		if (opp->funcs->opp_program_dpg_dimensions)
2321 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2322 	}
2323 }
2324 
dcn10_enable_timing_synchronization(struct dc * dc,struct dc_state * state,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2325 void dcn10_enable_timing_synchronization(
2326 	struct dc *dc,
2327 	struct dc_state *state,
2328 	int group_index,
2329 	int group_size,
2330 	struct pipe_ctx *grouped_pipes[])
2331 {
2332 	struct dc_context *dc_ctx = dc->ctx;
2333 	struct output_pixel_processor *opp;
2334 	struct timing_generator *tg;
2335 	int i, width = 0, height = 0;
2336 
2337 	DC_LOGGER_INIT(dc_ctx->logger);
2338 
2339 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2340 
2341 	for (i = 1; i < group_size; i++) {
2342 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2343 			continue;
2344 
2345 		opp = grouped_pipes[i]->stream_res.opp;
2346 		tg = grouped_pipes[i]->stream_res.tg;
2347 		tg->funcs->get_otg_active_size(tg, &width, &height);
2348 
2349 		if (!tg->funcs->is_tg_enabled(tg)) {
2350 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2351 			return;
2352 		}
2353 
2354 		if (opp->funcs->opp_program_dpg_dimensions)
2355 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2356 	}
2357 
2358 	for (i = 0; i < group_size; i++) {
2359 		if (grouped_pipes[i]->stream == NULL)
2360 			continue;
2361 
2362 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2363 			continue;
2364 
2365 		grouped_pipes[i]->stream->vblank_synchronized = false;
2366 	}
2367 
2368 	for (i = 1; i < group_size; i++) {
2369 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2370 			continue;
2371 
2372 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2373 				grouped_pipes[i]->stream_res.tg,
2374 				grouped_pipes[0]->stream_res.tg->inst);
2375 	}
2376 
2377 	DC_SYNC_INFO("Waiting for trigger\n");
2378 
2379 	/* Need to get only check 1 pipe for having reset as all the others are
2380 	 * synchronized. Look at last pipe programmed to reset.
2381 	 */
2382 
2383 	if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
2384 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2385 
2386 	for (i = 1; i < group_size; i++) {
2387 		if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2388 			continue;
2389 
2390 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2391 				grouped_pipes[i]->stream_res.tg);
2392 	}
2393 
2394 	for (i = 1; i < group_size; i++) {
2395 		if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2396 			continue;
2397 
2398 		opp = grouped_pipes[i]->stream_res.opp;
2399 		tg = grouped_pipes[i]->stream_res.tg;
2400 		tg->funcs->get_otg_active_size(tg, &width, &height);
2401 		if (opp->funcs->opp_program_dpg_dimensions)
2402 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2403 	}
2404 
2405 	DC_SYNC_INFO("Sync complete\n");
2406 }
2407 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2408 void dcn10_enable_per_frame_crtc_position_reset(
2409 	struct dc *dc,
2410 	int group_size,
2411 	struct pipe_ctx *grouped_pipes[])
2412 {
2413 	struct dc_context *dc_ctx = dc->ctx;
2414 	int i;
2415 
2416 	DC_LOGGER_INIT(dc_ctx->logger);
2417 
2418 	DC_SYNC_INFO("Setting up\n");
2419 	for (i = 0; i < group_size; i++)
2420 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2421 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2422 					grouped_pipes[i]->stream_res.tg,
2423 					0,
2424 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2425 
2426 	DC_SYNC_INFO("Waiting for trigger\n");
2427 
2428 	for (i = 0; i < group_size; i++)
2429 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2430 
2431 	DC_SYNC_INFO("Multi-display sync is complete\n");
2432 }
2433 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2434 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2435 		struct vm_system_aperture_param *apt,
2436 		struct dce_hwseq *hws)
2437 {
2438 	PHYSICAL_ADDRESS_LOC physical_page_number;
2439 	uint32_t logical_addr_low;
2440 	uint32_t logical_addr_high;
2441 
2442 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2443 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2444 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2445 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2446 
2447 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2448 			LOGICAL_ADDR, &logical_addr_low);
2449 
2450 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2451 			LOGICAL_ADDR, &logical_addr_high);
2452 
2453 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2454 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2455 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2456 }
2457 
2458 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2459 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2460 		struct vm_context0_param *vm0,
2461 		struct dce_hwseq *hws)
2462 {
2463 	PHYSICAL_ADDRESS_LOC fb_base;
2464 	PHYSICAL_ADDRESS_LOC fb_offset;
2465 	uint32_t fb_base_value;
2466 	uint32_t fb_offset_value;
2467 
2468 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2469 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2470 
2471 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2472 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2473 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2474 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2475 
2476 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2477 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2478 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2479 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2480 
2481 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2482 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2483 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2484 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2485 
2486 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2487 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2488 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2489 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2490 
2491 	/*
2492 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2493 	 * Therefore we need to do
2494 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2495 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2496 	 */
2497 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2498 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2499 	vm0->pte_base.quad_part += fb_base.quad_part;
2500 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2501 }
2502 
2503 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2504 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2505 {
2506 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2507 	struct vm_system_aperture_param apt = {0};
2508 	struct vm_context0_param vm0 = {0};
2509 
2510 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2511 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2512 
2513 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2514 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2515 }
2516 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2517 static void dcn10_enable_plane(
2518 	struct dc *dc,
2519 	struct pipe_ctx *pipe_ctx,
2520 	struct dc_state *context)
2521 {
2522 	struct dce_hwseq *hws = dc->hwseq;
2523 
2524 	if (dc->debug.sanity_checks) {
2525 		hws->funcs.verify_allow_pstate_change_high(dc);
2526 	}
2527 
2528 	undo_DEGVIDCN10_253_wa(dc);
2529 
2530 	power_on_plane_resources(dc->hwseq,
2531 		pipe_ctx->plane_res.hubp->inst);
2532 
2533 	/* enable DCFCLK current DCHUB */
2534 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2535 
2536 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2537 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2538 			pipe_ctx->stream_res.opp,
2539 			true);
2540 
2541 	if (dc->config.gpu_vm_support)
2542 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2543 
2544 	if (dc->debug.sanity_checks) {
2545 		hws->funcs.verify_allow_pstate_change_high(dc);
2546 	}
2547 
2548 	if (!pipe_ctx->top_pipe
2549 		&& pipe_ctx->plane_state
2550 		&& pipe_ctx->plane_state->flip_int_enabled
2551 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2552 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2553 
2554 }
2555 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2556 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2557 {
2558 	int i = 0;
2559 	struct dpp_grph_csc_adjustment adjust;
2560 	memset(&adjust, 0, sizeof(adjust));
2561 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2562 
2563 
2564 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2565 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2566 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2567 			adjust.temperature_matrix[i] =
2568 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2569 	} else if (pipe_ctx->plane_state &&
2570 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2571 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2572 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2573 			adjust.temperature_matrix[i] =
2574 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2575 	}
2576 
2577 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2578 }
2579 
2580 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2581 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2582 {
2583 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2584 		if (pipe_ctx->top_pipe) {
2585 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2586 
2587 			while (top->top_pipe)
2588 				top = top->top_pipe; // Traverse to top pipe_ctx
2589 			if (top->plane_state && top->plane_state->layer_index == 0 && !top->plane_state->global_alpha)
2590 				// Global alpha used by top plane for PIP overlay
2591 				// Pre-multiplied/per-pixel alpha used by MPO
2592 				// Check top plane's global alpha to ensure layer_index > 0 not caused by PIP
2593 				return true; // MPO in use and front plane not hidden
2594 		}
2595 	}
2596 	return false;
2597 }
2598 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2599 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2600 {
2601 	// Override rear plane RGB bias to fix MPO brightness
2602 	uint16_t rgb_bias = matrix[3];
2603 
2604 	matrix[3] = 0;
2605 	matrix[7] = 0;
2606 	matrix[11] = 0;
2607 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2608 	matrix[3] = rgb_bias;
2609 	matrix[7] = rgb_bias;
2610 	matrix[11] = rgb_bias;
2611 }
2612 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2613 void dcn10_program_output_csc(struct dc *dc,
2614 		struct pipe_ctx *pipe_ctx,
2615 		enum dc_color_space colorspace,
2616 		uint16_t *matrix,
2617 		int opp_id)
2618 {
2619 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2620 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2621 
2622 			/* MPO is broken with RGB colorspaces when OCSC matrix
2623 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2624 			 * Blending adds offsets from front + rear to rear plane
2625 			 *
2626 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2627 			 * black value pixels add offset instead of rear + front
2628 			 */
2629 
2630 			int16_t rgb_bias = matrix[3];
2631 			// matrix[3/7/11] are all the same offset value
2632 
2633 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2634 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2635 			} else {
2636 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2637 			}
2638 		}
2639 	} else {
2640 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2641 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2642 	}
2643 }
2644 
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2645 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2646 {
2647 	struct dc_bias_and_scale bns_params = {0};
2648 
2649 	// program the input csc
2650 	dpp->funcs->dpp_setup(dpp,
2651 			plane_state->format,
2652 			EXPANSION_MODE_ZERO,
2653 			plane_state->input_csc_color_matrix,
2654 			plane_state->color_space,
2655 			NULL);
2656 
2657 	//set scale and bias registers
2658 	build_prescale_params(&bns_params, plane_state);
2659 	if (dpp->funcs->dpp_program_bias_and_scale)
2660 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2661 }
2662 
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2663 void dcn10_update_visual_confirm_color(struct dc *dc,
2664 		struct pipe_ctx *pipe_ctx,
2665 		int mpcc_id)
2666 {
2667 	struct mpc *mpc = dc->res_pool->mpc;
2668 
2669 	if (mpc->funcs->set_bg_color) {
2670 		memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2671 		mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2672 	}
2673 }
2674 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2675 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2676 {
2677 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2678 	struct mpcc_blnd_cfg blnd_cfg = {0};
2679 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2680 	int mpcc_id;
2681 	struct mpcc *new_mpcc;
2682 	struct mpc *mpc = dc->res_pool->mpc;
2683 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2684 
2685 	blnd_cfg.overlap_only = false;
2686 	blnd_cfg.global_gain = 0xff;
2687 
2688 	if (per_pixel_alpha) {
2689 		/* DCN1.0 has output CM before MPC which seems to screw with
2690 		 * pre-multiplied alpha.
2691 		 */
2692 		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2693 				pipe_ctx->stream->output_color_space)
2694 						&& pipe_ctx->plane_state->pre_multiplied_alpha);
2695 		if (pipe_ctx->plane_state->global_alpha) {
2696 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2697 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2698 		} else {
2699 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2700 		}
2701 	} else {
2702 		blnd_cfg.pre_multiplied_alpha = false;
2703 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2704 	}
2705 
2706 	if (pipe_ctx->plane_state->global_alpha)
2707 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2708 	else
2709 		blnd_cfg.global_alpha = 0xff;
2710 
2711 	/*
2712 	 * TODO: remove hack
2713 	 * Note: currently there is a bug in init_hw such that
2714 	 * on resume from hibernate, BIOS sets up MPCC0, and
2715 	 * we do mpcc_remove but the mpcc cannot go to idle
2716 	 * after remove. This cause us to pick mpcc1 here,
2717 	 * which causes a pstate hang for yet unknown reason.
2718 	 */
2719 	mpcc_id = hubp->inst;
2720 
2721 	/* If there is no full update, don't need to touch MPC tree*/
2722 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2723 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2724 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2725 		return;
2726 	}
2727 
2728 	/* check if this MPCC is already being used */
2729 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2730 	/* remove MPCC if being used */
2731 	if (new_mpcc != NULL)
2732 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2733 	else
2734 		if (dc->debug.sanity_checks)
2735 			mpc->funcs->assert_mpcc_idle_before_connect(
2736 					dc->res_pool->mpc, mpcc_id);
2737 
2738 	/* Call MPC to insert new plane */
2739 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2740 			mpc_tree_params,
2741 			&blnd_cfg,
2742 			NULL,
2743 			NULL,
2744 			hubp->inst,
2745 			mpcc_id);
2746 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2747 
2748 	ASSERT(new_mpcc != NULL);
2749 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2750 	hubp->mpcc_id = mpcc_id;
2751 }
2752 
update_scaler(struct pipe_ctx * pipe_ctx)2753 static void update_scaler(struct pipe_ctx *pipe_ctx)
2754 {
2755 	bool per_pixel_alpha =
2756 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2757 
2758 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2759 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2760 	/* scaler configuration */
2761 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2762 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2763 }
2764 
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2765 static void dcn10_update_dchubp_dpp(
2766 	struct dc *dc,
2767 	struct pipe_ctx *pipe_ctx,
2768 	struct dc_state *context)
2769 {
2770 	struct dce_hwseq *hws = dc->hwseq;
2771 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2772 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2773 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2774 	struct plane_size size = plane_state->plane_size;
2775 	unsigned int compat_level = 0;
2776 	bool should_divided_by_2 = false;
2777 
2778 	/* depends on DML calculation, DPP clock value may change dynamically */
2779 	/* If request max dpp clk is lower than current dispclk, no need to
2780 	 * divided by 2
2781 	 */
2782 	if (plane_state->update_flags.bits.full_update) {
2783 
2784 		/* new calculated dispclk, dppclk are stored in
2785 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2786 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2787 		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2788 		 * dispclk will put in use after optimize_bandwidth when
2789 		 * ramp_up_dispclk_with_dpp is called.
2790 		 * there are two places for dppclk be put in use. One location
2791 		 * is the same as the location as dispclk. Another is within
2792 		 * update_dchubp_dpp which happens between pre_bandwidth and
2793 		 * optimize_bandwidth.
2794 		 * dppclk updated within update_dchubp_dpp will cause new
2795 		 * clock values of dispclk and dppclk not be in use at the same
2796 		 * time. when clocks are decreased, this may cause dppclk is
2797 		 * lower than previous configuration and let pipe stuck.
2798 		 * for example, eDP + external dp,  change resolution of DP from
2799 		 * 1920x1080x144hz to 1280x960x60hz.
2800 		 * before change: dispclk = 337889 dppclk = 337889
2801 		 * change mode, dcn10_validate_bandwidth calculate
2802 		 *                dispclk = 143122 dppclk = 143122
2803 		 * update_dchubp_dpp be executed before dispclk be updated,
2804 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2805 		 * 168944. this will cause pipe pstate warning issue.
2806 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2807 		 * dispclk is going to be decreased, keep dppclk = dispclk
2808 		 **/
2809 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2810 				dc->clk_mgr->clks.dispclk_khz)
2811 			should_divided_by_2 = false;
2812 		else
2813 			should_divided_by_2 =
2814 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2815 					dc->clk_mgr->clks.dispclk_khz / 2;
2816 
2817 		dpp->funcs->dpp_dppclk_control(
2818 				dpp,
2819 				should_divided_by_2,
2820 				true);
2821 
2822 		if (dc->res_pool->dccg)
2823 			dc->res_pool->dccg->funcs->update_dpp_dto(
2824 					dc->res_pool->dccg,
2825 					dpp->inst,
2826 					pipe_ctx->plane_res.bw.dppclk_khz);
2827 		else
2828 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2829 						dc->clk_mgr->clks.dispclk_khz / 2 :
2830 							dc->clk_mgr->clks.dispclk_khz;
2831 	}
2832 
2833 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2834 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2835 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2836 	 */
2837 	if (plane_state->update_flags.bits.full_update) {
2838 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2839 
2840 		hubp->funcs->hubp_setup(
2841 			hubp,
2842 			&pipe_ctx->dlg_regs,
2843 			&pipe_ctx->ttu_regs,
2844 			&pipe_ctx->rq_regs,
2845 			&pipe_ctx->pipe_dlg_param);
2846 		hubp->funcs->hubp_setup_interdependent(
2847 			hubp,
2848 			&pipe_ctx->dlg_regs,
2849 			&pipe_ctx->ttu_regs);
2850 	}
2851 
2852 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2853 
2854 	if (plane_state->update_flags.bits.full_update ||
2855 		plane_state->update_flags.bits.bpp_change)
2856 		dcn10_update_dpp(dpp, plane_state);
2857 
2858 	if (plane_state->update_flags.bits.full_update ||
2859 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2860 		plane_state->update_flags.bits.global_alpha_change)
2861 		hws->funcs.update_mpcc(dc, pipe_ctx);
2862 
2863 	if (plane_state->update_flags.bits.full_update ||
2864 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2865 		plane_state->update_flags.bits.global_alpha_change ||
2866 		plane_state->update_flags.bits.scaling_change ||
2867 		plane_state->update_flags.bits.position_change) {
2868 		update_scaler(pipe_ctx);
2869 	}
2870 
2871 	if (plane_state->update_flags.bits.full_update ||
2872 		plane_state->update_flags.bits.scaling_change ||
2873 		plane_state->update_flags.bits.position_change) {
2874 		hubp->funcs->mem_program_viewport(
2875 			hubp,
2876 			&pipe_ctx->plane_res.scl_data.viewport,
2877 			&pipe_ctx->plane_res.scl_data.viewport_c);
2878 	}
2879 
2880 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2881 		dc->hwss.set_cursor_attribute(pipe_ctx);
2882 		dc->hwss.set_cursor_position(pipe_ctx);
2883 
2884 		if (dc->hwss.set_cursor_sdr_white_level)
2885 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2886 	}
2887 
2888 	if (plane_state->update_flags.bits.full_update) {
2889 		/*gamut remap*/
2890 		dc->hwss.program_gamut_remap(pipe_ctx);
2891 
2892 		dc->hwss.program_output_csc(dc,
2893 				pipe_ctx,
2894 				pipe_ctx->stream->output_color_space,
2895 				pipe_ctx->stream->csc_color_matrix.matrix,
2896 				pipe_ctx->stream_res.opp->inst);
2897 	}
2898 
2899 	if (plane_state->update_flags.bits.full_update ||
2900 		plane_state->update_flags.bits.pixel_format_change ||
2901 		plane_state->update_flags.bits.horizontal_mirror_change ||
2902 		plane_state->update_flags.bits.rotation_change ||
2903 		plane_state->update_flags.bits.swizzle_change ||
2904 		plane_state->update_flags.bits.dcc_change ||
2905 		plane_state->update_flags.bits.bpp_change ||
2906 		plane_state->update_flags.bits.scaling_change ||
2907 		plane_state->update_flags.bits.plane_size_change) {
2908 		hubp->funcs->hubp_program_surface_config(
2909 			hubp,
2910 			plane_state->format,
2911 			&plane_state->tiling_info,
2912 			&size,
2913 			plane_state->rotation,
2914 			&plane_state->dcc,
2915 			plane_state->horizontal_mirror,
2916 			compat_level);
2917 	}
2918 
2919 	hubp->power_gated = false;
2920 
2921 	dc->hwss.update_plane_addr(dc, pipe_ctx);
2922 
2923 	if (is_pipe_tree_visible(pipe_ctx))
2924 		hubp->funcs->set_blank(hubp, false);
2925 }
2926 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2927 void dcn10_blank_pixel_data(
2928 		struct dc *dc,
2929 		struct pipe_ctx *pipe_ctx,
2930 		bool blank)
2931 {
2932 	enum dc_color_space color_space;
2933 	struct tg_color black_color = {0};
2934 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2935 	struct dc_stream_state *stream = pipe_ctx->stream;
2936 
2937 	/* program otg blank color */
2938 	color_space = stream->output_color_space;
2939 	color_space_to_black_color(dc, color_space, &black_color);
2940 
2941 	/*
2942 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2943 	 * alternate between Cb and Cr, so both channels need the pixel
2944 	 * value for Y
2945 	 */
2946 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2947 		black_color.color_r_cr = black_color.color_g_y;
2948 
2949 
2950 	if (stream_res->tg->funcs->set_blank_color)
2951 		stream_res->tg->funcs->set_blank_color(
2952 				stream_res->tg,
2953 				&black_color);
2954 
2955 	if (!blank) {
2956 		if (stream_res->tg->funcs->set_blank)
2957 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2958 		if (stream_res->abm) {
2959 			dc->hwss.set_pipe(pipe_ctx);
2960 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2961 		}
2962 	} else {
2963 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2964 		if (stream_res->tg->funcs->set_blank) {
2965 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2966 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2967 		}
2968 	}
2969 }
2970 
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2971 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2972 {
2973 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2974 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2975 	struct custom_float_format fmt;
2976 
2977 	fmt.exponenta_bits = 6;
2978 	fmt.mantissa_bits = 12;
2979 	fmt.sign = true;
2980 
2981 
2982 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2983 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2984 
2985 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2986 			pipe_ctx->plane_res.dpp, hw_mult);
2987 }
2988 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2989 void dcn10_program_pipe(
2990 		struct dc *dc,
2991 		struct pipe_ctx *pipe_ctx,
2992 		struct dc_state *context)
2993 {
2994 	struct dce_hwseq *hws = dc->hwseq;
2995 
2996 	if (pipe_ctx->top_pipe == NULL) {
2997 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2998 
2999 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
3000 				pipe_ctx->stream_res.tg,
3001 				calculate_vready_offset_for_group(pipe_ctx),
3002 				pipe_ctx->pipe_dlg_param.vstartup_start,
3003 				pipe_ctx->pipe_dlg_param.vupdate_offset,
3004 				pipe_ctx->pipe_dlg_param.vupdate_width,
3005 				pipe_ctx->pipe_dlg_param.pstate_keepout);
3006 
3007 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
3008 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
3009 
3010 		if (hws->funcs.setup_vupdate_interrupt)
3011 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
3012 
3013 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
3014 	}
3015 
3016 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
3017 		dcn10_enable_plane(dc, pipe_ctx, context);
3018 
3019 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
3020 
3021 	hws->funcs.set_hdr_multiplier(pipe_ctx);
3022 
3023 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
3024 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
3025 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
3026 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
3027 
3028 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
3029 	 * only do gamma programming for full update.
3030 	 * TODO: This can be further optimized/cleaned up
3031 	 * Always call this for now since it does memcmp inside before
3032 	 * doing heavy calculation and programming
3033 	 */
3034 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
3035 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
3036 }
3037 
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)3038 void dcn10_wait_for_pending_cleared(struct dc *dc,
3039 		struct dc_state *context)
3040 {
3041 		struct pipe_ctx *pipe_ctx;
3042 		struct timing_generator *tg;
3043 		int i;
3044 
3045 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
3046 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
3047 			tg = pipe_ctx->stream_res.tg;
3048 
3049 			/*
3050 			 * Only wait for top pipe's tg penindg bit
3051 			 * Also skip if pipe is disabled.
3052 			 */
3053 			if (pipe_ctx->top_pipe ||
3054 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
3055 			    !tg->funcs->is_tg_enabled(tg))
3056 				continue;
3057 
3058 			/*
3059 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3060 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
3061 			 * seems to not trigger the update right away, and if we
3062 			 * lock again before VUPDATE then we don't get a separated
3063 			 * operation.
3064 			 */
3065 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3066 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3067 		}
3068 }
3069 
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)3070 void dcn10_post_unlock_program_front_end(
3071 		struct dc *dc,
3072 		struct dc_state *context)
3073 {
3074 	int i;
3075 
3076 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3077 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3078 
3079 		if (!pipe_ctx->top_pipe &&
3080 			!pipe_ctx->prev_odm_pipe &&
3081 			pipe_ctx->stream) {
3082 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3083 
3084 			if (context->stream_status[i].plane_count == 0)
3085 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3086 		}
3087 	}
3088 
3089 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3090 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3091 			dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3092 
3093 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3094 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3095 			dc->hwss.optimize_bandwidth(dc, context);
3096 			break;
3097 		}
3098 
3099 	if (dc->hwseq->wa.DEGVIDCN10_254)
3100 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3101 }
3102 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3103 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3104 {
3105 	uint8_t i;
3106 
3107 	for (i = 0; i < context->stream_count; i++) {
3108 		if (context->streams[i]->timing.timing_3d_format
3109 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3110 			/*
3111 			 * Disable stutter
3112 			 */
3113 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3114 			break;
3115 		}
3116 	}
3117 }
3118 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3119 void dcn10_prepare_bandwidth(
3120 		struct dc *dc,
3121 		struct dc_state *context)
3122 {
3123 	struct dce_hwseq *hws = dc->hwseq;
3124 	struct hubbub *hubbub = dc->res_pool->hubbub;
3125 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3126 
3127 	if (dc->debug.sanity_checks)
3128 		hws->funcs.verify_allow_pstate_change_high(dc);
3129 
3130 	if (context->stream_count == 0)
3131 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3132 
3133 	dc->clk_mgr->funcs->update_clocks(
3134 			dc->clk_mgr,
3135 			context,
3136 			false);
3137 
3138 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3139 			&context->bw_ctx.bw.dcn.watermarks,
3140 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3141 			true);
3142 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3143 
3144 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3145 		DC_FP_START();
3146 		dcn_get_soc_clks(
3147 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3148 		DC_FP_END();
3149 		dcn_bw_notify_pplib_of_wm_ranges(
3150 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3151 	}
3152 
3153 	if (dc->debug.sanity_checks)
3154 		hws->funcs.verify_allow_pstate_change_high(dc);
3155 }
3156 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3157 void dcn10_optimize_bandwidth(
3158 		struct dc *dc,
3159 		struct dc_state *context)
3160 {
3161 	struct dce_hwseq *hws = dc->hwseq;
3162 	struct hubbub *hubbub = dc->res_pool->hubbub;
3163 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3164 
3165 	if (dc->debug.sanity_checks)
3166 		hws->funcs.verify_allow_pstate_change_high(dc);
3167 
3168 	if (context->stream_count == 0)
3169 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3170 
3171 	dc->clk_mgr->funcs->update_clocks(
3172 			dc->clk_mgr,
3173 			context,
3174 			true);
3175 
3176 	hubbub->funcs->program_watermarks(hubbub,
3177 			&context->bw_ctx.bw.dcn.watermarks,
3178 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3179 			true);
3180 
3181 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3182 
3183 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3184 		DC_FP_START();
3185 		dcn_get_soc_clks(
3186 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3187 		DC_FP_END();
3188 		dcn_bw_notify_pplib_of_wm_ranges(
3189 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3190 	}
3191 
3192 	if (dc->debug.sanity_checks)
3193 		hws->funcs.verify_allow_pstate_change_high(dc);
3194 }
3195 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3196 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3197 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3198 {
3199 	int i = 0;
3200 	struct drr_params params = {0};
3201 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3202 	unsigned int event_triggers = 0x800;
3203 	// Note DRR trigger events are generated regardless of whether num frames met.
3204 	unsigned int num_frames = 2;
3205 
3206 	params.vertical_total_max = adjust.v_total_max;
3207 	params.vertical_total_min = adjust.v_total_min;
3208 	params.vertical_total_mid = adjust.v_total_mid;
3209 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3210 	/* TODO: If multiple pipes are to be supported, you need
3211 	 * some GSL stuff. Static screen triggers may be programmed differently
3212 	 * as well.
3213 	 */
3214 	for (i = 0; i < num_pipes; i++) {
3215 		/* dc_state_destruct() might null the stream resources, so fetch tg
3216 		 * here first to avoid a race condition. The lifetime of the pointee
3217 		 * itself (the timing_generator object) is not a problem here.
3218 		 */
3219 		struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
3220 
3221 		if ((tg != NULL) && tg->funcs) {
3222 			if (tg->funcs->set_drr)
3223 				tg->funcs->set_drr(tg, &params);
3224 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3225 				if (tg->funcs->set_static_screen_control)
3226 					tg->funcs->set_static_screen_control(
3227 						tg, event_triggers, num_frames);
3228 		}
3229 	}
3230 }
3231 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3232 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3233 		int num_pipes,
3234 		struct crtc_position *position)
3235 {
3236 	int i = 0;
3237 
3238 	/* TODO: handle pipes > 1
3239 	 */
3240 	for (i = 0; i < num_pipes; i++)
3241 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3242 }
3243 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3244 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3245 		int num_pipes, const struct dc_static_screen_params *params)
3246 {
3247 	unsigned int i;
3248 	unsigned int triggers = 0;
3249 
3250 	if (params->triggers.surface_update)
3251 		triggers |= 0x80;
3252 	if (params->triggers.cursor_update)
3253 		triggers |= 0x2;
3254 	if (params->triggers.force_trigger)
3255 		triggers |= 0x1;
3256 
3257 	for (i = 0; i < num_pipes; i++)
3258 		pipe_ctx[i]->stream_res.tg->funcs->
3259 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3260 					triggers, params->num_frames);
3261 }
3262 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3263 static void dcn10_config_stereo_parameters(
3264 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3265 {
3266 	enum view_3d_format view_format = stream->view_format;
3267 	enum dc_timing_3d_format timing_3d_format =\
3268 			stream->timing.timing_3d_format;
3269 	bool non_stereo_timing = false;
3270 
3271 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3272 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3273 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3274 		non_stereo_timing = true;
3275 
3276 	if (non_stereo_timing == false &&
3277 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3278 
3279 		flags->PROGRAM_STEREO         = 1;
3280 		flags->PROGRAM_POLARITY       = 1;
3281 		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3282 			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3283 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3284 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3285 
3286 			if (stream->link && stream->link->ddc) {
3287 				enum display_dongle_type dongle = \
3288 						stream->link->ddc->dongle_type;
3289 
3290 				if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3291 					dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3292 					dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3293 					flags->DISABLE_STEREO_DP_SYNC = 1;
3294 			}
3295 		}
3296 		flags->RIGHT_EYE_POLARITY =\
3297 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3298 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3299 			flags->FRAME_PACKED = 1;
3300 	}
3301 
3302 	return;
3303 }
3304 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3305 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3306 {
3307 	struct crtc_stereo_flags flags = { 0 };
3308 	struct dc_stream_state *stream = pipe_ctx->stream;
3309 
3310 	dcn10_config_stereo_parameters(stream, &flags);
3311 
3312 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3313 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3314 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3315 	} else {
3316 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3317 	}
3318 
3319 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3320 		pipe_ctx->stream_res.opp,
3321 		flags.PROGRAM_STEREO == 1,
3322 		&stream->timing);
3323 
3324 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3325 		pipe_ctx->stream_res.tg,
3326 		&stream->timing,
3327 		&flags);
3328 
3329 	return;
3330 }
3331 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3332 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3333 {
3334 	int i;
3335 
3336 	for (i = 0; i < res_pool->pipe_count; i++) {
3337 		if (res_pool->hubps[i]->inst == mpcc_inst)
3338 			return res_pool->hubps[i];
3339 	}
3340 	ASSERT(false);
3341 	return NULL;
3342 }
3343 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3344 void dcn10_wait_for_mpcc_disconnect(
3345 		struct dc *dc,
3346 		struct resource_pool *res_pool,
3347 		struct pipe_ctx *pipe_ctx)
3348 {
3349 	struct dce_hwseq *hws = dc->hwseq;
3350 	int mpcc_inst;
3351 
3352 	if (dc->debug.sanity_checks) {
3353 		hws->funcs.verify_allow_pstate_change_high(dc);
3354 	}
3355 
3356 	if (!pipe_ctx->stream_res.opp)
3357 		return;
3358 
3359 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3360 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3361 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3362 
3363 			if (pipe_ctx->stream_res.tg &&
3364 				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3365 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3366 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3367 			hubp->funcs->set_blank(hubp, true);
3368 		}
3369 	}
3370 
3371 	if (dc->debug.sanity_checks) {
3372 		hws->funcs.verify_allow_pstate_change_high(dc);
3373 	}
3374 
3375 }
3376 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3377 bool dcn10_dummy_display_power_gating(
3378 	struct dc *dc,
3379 	uint8_t controller_id,
3380 	struct dc_bios *dcb,
3381 	enum pipe_gating_control power_gating)
3382 {
3383 	return true;
3384 }
3385 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3386 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3387 {
3388 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3389 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3390 	bool flip_pending;
3391 	struct dc *dc = pipe_ctx->stream->ctx->dc;
3392 
3393 	if (plane_state == NULL)
3394 		return;
3395 
3396 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3397 					pipe_ctx->plane_res.hubp);
3398 
3399 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3400 
3401 	if (!flip_pending)
3402 		plane_state->status.current_address = plane_state->status.requested_address;
3403 
3404 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3405 			tg->funcs->is_stereo_left_eye) {
3406 		plane_state->status.is_right_eye =
3407 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3408 	}
3409 
3410 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3411 		struct dce_hwseq *hwseq = dc->hwseq;
3412 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3413 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3414 
3415 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3416 			struct hubbub *hubbub = dc->res_pool->hubbub;
3417 
3418 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3419 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3420 		}
3421 	}
3422 }
3423 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3424 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3425 {
3426 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3427 
3428 	/* In DCN, this programming sequence is owned by the hubbub */
3429 	hubbub->funcs->update_dchub(hubbub, dh_data);
3430 }
3431 
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3432 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3433 {
3434 	struct pipe_ctx *test_pipe, *split_pipe;
3435 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3436 	struct rect r1 = scl_data->recout, r2, r2_half;
3437 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3438 	int cur_layer = pipe_ctx->plane_state->layer_index;
3439 
3440 	/**
3441 	 * Disable the cursor if there's another pipe above this with a
3442 	 * plane that contains this pipe's viewport to prevent double cursor
3443 	 * and incorrect scaling artifacts.
3444 	 */
3445 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3446 	     test_pipe = test_pipe->top_pipe) {
3447 		// Skip invisible layer and pipe-split plane on same layer
3448 		if (!test_pipe->plane_state ||
3449 		    !test_pipe->plane_state->visible ||
3450 		    test_pipe->plane_state->layer_index == cur_layer)
3451 			continue;
3452 
3453 		r2 = test_pipe->plane_res.scl_data.recout;
3454 		r2_r = r2.x + r2.width;
3455 		r2_b = r2.y + r2.height;
3456 		split_pipe = test_pipe;
3457 
3458 		/**
3459 		 * There is another half plane on same layer because of
3460 		 * pipe-split, merge together per same height.
3461 		 */
3462 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3463 		     split_pipe = split_pipe->top_pipe)
3464 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3465 				r2_half = split_pipe->plane_res.scl_data.recout;
3466 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3467 				r2.width = r2.width + r2_half.width;
3468 				r2_r = r2.x + r2.width;
3469 				break;
3470 			}
3471 
3472 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3473 			return true;
3474 	}
3475 
3476 	return false;
3477 }
3478 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3479 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3480 {
3481 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3482 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3483 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3484 	struct dc_cursor_mi_param param = {
3485 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3486 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3487 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3488 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3489 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3490 		.rotation = pipe_ctx->plane_state->rotation,
3491 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
3492 		.stream = pipe_ctx->stream,
3493 	};
3494 	bool pipe_split_on = false;
3495 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3496 		(pipe_ctx->prev_odm_pipe != NULL);
3497 
3498 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3499 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3500 	int x_pos = pos_cpy.x;
3501 	int y_pos = pos_cpy.y;
3502 
3503 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3504 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3505 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3506 			pipe_split_on = true;
3507 		}
3508 	}
3509 
3510 	/**
3511 	 * DC cursor is stream space, HW cursor is plane space and drawn
3512 	 * as part of the framebuffer.
3513 	 *
3514 	 * Cursor position can't be negative, but hotspot can be used to
3515 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3516 	 * than the cursor size.
3517 	 */
3518 
3519 	/**
3520 	 * Translate cursor from stream space to plane space.
3521 	 *
3522 	 * If the cursor is scaled then we need to scale the position
3523 	 * to be in the approximately correct place. We can't do anything
3524 	 * about the actual size being incorrect, that's a limitation of
3525 	 * the hardware.
3526 	 */
3527 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3528 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3529 				pipe_ctx->plane_state->dst_rect.width;
3530 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3531 				pipe_ctx->plane_state->dst_rect.height;
3532 	} else {
3533 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3534 				pipe_ctx->plane_state->dst_rect.width;
3535 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3536 				pipe_ctx->plane_state->dst_rect.height;
3537 	}
3538 
3539 	/**
3540 	 * If the cursor's source viewport is clipped then we need to
3541 	 * translate the cursor to appear in the correct position on
3542 	 * the screen.
3543 	 *
3544 	 * This translation isn't affected by scaling so it needs to be
3545 	 * done *after* we adjust the position for the scale factor.
3546 	 *
3547 	 * This is only done by opt-in for now since there are still
3548 	 * some usecases like tiled display that might enable the
3549 	 * cursor on both streams while expecting dc to clip it.
3550 	 */
3551 	if (pos_cpy.translate_by_source) {
3552 		x_pos += pipe_ctx->plane_state->src_rect.x;
3553 		y_pos += pipe_ctx->plane_state->src_rect.y;
3554 	}
3555 
3556 	/**
3557 	 * If the position is negative then we need to add to the hotspot
3558 	 * to shift the cursor outside the plane.
3559 	 */
3560 
3561 	if (x_pos < 0) {
3562 		pos_cpy.x_hotspot -= x_pos;
3563 		x_pos = 0;
3564 	}
3565 
3566 	if (y_pos < 0) {
3567 		pos_cpy.y_hotspot -= y_pos;
3568 		y_pos = 0;
3569 	}
3570 
3571 	pos_cpy.x = (uint32_t)x_pos;
3572 	pos_cpy.y = (uint32_t)y_pos;
3573 
3574 	if (pipe_ctx->plane_state->address.type
3575 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3576 		pos_cpy.enable = false;
3577 
3578 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3579 		pos_cpy.enable = false;
3580 
3581 
3582 	if (param.rotation == ROTATION_ANGLE_0) {
3583 		int viewport_width =
3584 			pipe_ctx->plane_res.scl_data.viewport.width;
3585 		int viewport_x =
3586 			pipe_ctx->plane_res.scl_data.viewport.x;
3587 
3588 		if (param.mirror) {
3589 			if (pipe_split_on || odm_combine_on) {
3590 				if (pos_cpy.x >= viewport_width + viewport_x) {
3591 					pos_cpy.x = 2 * viewport_width
3592 							- pos_cpy.x + 2 * viewport_x;
3593 				} else {
3594 					uint32_t temp_x = pos_cpy.x;
3595 
3596 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3597 					if (temp_x >= viewport_x +
3598 						(int)hubp->curs_attr.width || pos_cpy.x
3599 						<= (int)hubp->curs_attr.width +
3600 						pipe_ctx->plane_state->src_rect.x) {
3601 						pos_cpy.x = 2 * viewport_width - temp_x;
3602 					}
3603 				}
3604 			} else {
3605 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3606 			}
3607 		}
3608 	}
3609 	// Swap axis and mirror horizontally
3610 	else if (param.rotation == ROTATION_ANGLE_90) {
3611 		uint32_t temp_x = pos_cpy.x;
3612 
3613 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3614 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3615 		pos_cpy.y = temp_x;
3616 	}
3617 	// Swap axis and mirror vertically
3618 	else if (param.rotation == ROTATION_ANGLE_270) {
3619 		uint32_t temp_y = pos_cpy.y;
3620 		int viewport_height =
3621 			pipe_ctx->plane_res.scl_data.viewport.height;
3622 		int viewport_y =
3623 			pipe_ctx->plane_res.scl_data.viewport.y;
3624 
3625 		/**
3626 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3627 		 * For pipe split cases:
3628 		 * - apply offset of viewport.y to normalize pos_cpy.x
3629 		 * - calculate the pos_cpy.y as before
3630 		 * - shift pos_cpy.y back by same offset to get final value
3631 		 * - since we iterate through both pipes, use the lower
3632 		 *   viewport.y for offset
3633 		 * For non pipe split cases, use the same calculation for
3634 		 *  pos_cpy.y as the 180 degree rotation case below,
3635 		 *  but use pos_cpy.x as our input because we are rotating
3636 		 *  270 degrees
3637 		 */
3638 		if (pipe_split_on || odm_combine_on) {
3639 			int pos_cpy_x_offset;
3640 			int other_pipe_viewport_y;
3641 
3642 			if (pipe_split_on) {
3643 				if (pipe_ctx->bottom_pipe) {
3644 					other_pipe_viewport_y =
3645 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3646 				} else {
3647 					other_pipe_viewport_y =
3648 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3649 				}
3650 			} else {
3651 				if (pipe_ctx->next_odm_pipe) {
3652 					other_pipe_viewport_y =
3653 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3654 				} else {
3655 					other_pipe_viewport_y =
3656 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3657 				}
3658 			}
3659 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3660 				other_pipe_viewport_y : viewport_y;
3661 			pos_cpy.x -= pos_cpy_x_offset;
3662 			if (pos_cpy.x > viewport_height) {
3663 				pos_cpy.x = pos_cpy.x - viewport_height;
3664 				pos_cpy.y = viewport_height - pos_cpy.x;
3665 			} else {
3666 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3667 			}
3668 			pos_cpy.y += pos_cpy_x_offset;
3669 		} else {
3670 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3671 		}
3672 		pos_cpy.x = temp_y;
3673 	}
3674 	// Mirror horizontally and vertically
3675 	else if (param.rotation == ROTATION_ANGLE_180) {
3676 		int viewport_width =
3677 			pipe_ctx->plane_res.scl_data.viewport.width;
3678 		int viewport_x =
3679 			pipe_ctx->plane_res.scl_data.viewport.x;
3680 
3681 		if (!param.mirror) {
3682 			if (pipe_split_on || odm_combine_on) {
3683 				if (pos_cpy.x >= viewport_width + viewport_x) {
3684 					pos_cpy.x = 2 * viewport_width
3685 							- pos_cpy.x + 2 * viewport_x;
3686 				} else {
3687 					uint32_t temp_x = pos_cpy.x;
3688 
3689 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3690 					if (temp_x >= viewport_x +
3691 						(int)hubp->curs_attr.width || pos_cpy.x
3692 						<= (int)hubp->curs_attr.width +
3693 						pipe_ctx->plane_state->src_rect.x) {
3694 						pos_cpy.x = temp_x + viewport_width;
3695 					}
3696 				}
3697 			} else {
3698 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3699 			}
3700 		}
3701 
3702 		/**
3703 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3704 		 * Calculation:
3705 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3706 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3707 		 * Simplify it as:
3708 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3709 		 */
3710 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3711 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3712 	}
3713 
3714 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3715 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3716 }
3717 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3718 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3719 {
3720 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3721 
3722 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3723 			pipe_ctx->plane_res.hubp, attributes);
3724 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3725 		pipe_ctx->plane_res.dpp, attributes);
3726 }
3727 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3728 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3729 {
3730 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3731 	struct fixed31_32 multiplier;
3732 	struct dpp_cursor_attributes opt_attr = { 0 };
3733 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3734 	struct custom_float_format fmt;
3735 
3736 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3737 		return;
3738 
3739 	fmt.exponenta_bits = 5;
3740 	fmt.mantissa_bits = 10;
3741 	fmt.sign = true;
3742 
3743 	if (sdr_white_level > 80) {
3744 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3745 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3746 	}
3747 
3748 	opt_attr.scale = hw_scale;
3749 	opt_attr.bias = 0;
3750 
3751 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3752 			pipe_ctx->plane_res.dpp, &opt_attr);
3753 }
3754 
3755 /*
3756  * apply_front_porch_workaround  TODO FPGA still need?
3757  *
3758  * This is a workaround for a bug that has existed since R5xx and has not been
3759  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3760  */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3761 static void apply_front_porch_workaround(
3762 	struct dc_crtc_timing *timing)
3763 {
3764 	if (timing->flags.INTERLACE == 1) {
3765 		if (timing->v_front_porch < 2)
3766 			timing->v_front_porch = 2;
3767 	} else {
3768 		if (timing->v_front_porch < 1)
3769 			timing->v_front_porch = 1;
3770 	}
3771 }
3772 
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3773 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3774 {
3775 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3776 	struct dc_crtc_timing patched_crtc_timing;
3777 	int vesa_sync_start;
3778 	int asic_blank_end;
3779 	int interlace_factor;
3780 
3781 	patched_crtc_timing = *dc_crtc_timing;
3782 	apply_front_porch_workaround(&patched_crtc_timing);
3783 
3784 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3785 
3786 	vesa_sync_start = patched_crtc_timing.v_addressable +
3787 			patched_crtc_timing.v_border_bottom +
3788 			patched_crtc_timing.v_front_porch;
3789 
3790 	asic_blank_end = (patched_crtc_timing.v_total -
3791 			vesa_sync_start -
3792 			patched_crtc_timing.v_border_top)
3793 			* interlace_factor;
3794 
3795 	return asic_blank_end -
3796 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3797 }
3798 
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3799 void dcn10_calc_vupdate_position(
3800 		struct dc *dc,
3801 		struct pipe_ctx *pipe_ctx,
3802 		uint32_t *start_line,
3803 		uint32_t *end_line)
3804 {
3805 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3806 	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3807 
3808 	if (vupdate_pos >= 0)
3809 		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3810 	else
3811 		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3812 	*end_line = (*start_line + 2) % timing->v_total;
3813 }
3814 
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3815 static void dcn10_cal_vline_position(
3816 		struct dc *dc,
3817 		struct pipe_ctx *pipe_ctx,
3818 		uint32_t *start_line,
3819 		uint32_t *end_line)
3820 {
3821 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3822 	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3823 
3824 	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3825 		if (vline_pos > 0)
3826 			vline_pos--;
3827 		else if (vline_pos < 0)
3828 			vline_pos++;
3829 
3830 		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3831 		if (vline_pos >= 0)
3832 			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3833 		else
3834 			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3835 		*end_line = (*start_line + 2) % timing->v_total;
3836 	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3837 		// vsync is line 0 so start_line is just the requested line offset
3838 		*start_line = vline_pos;
3839 		*end_line = (*start_line + 2) % timing->v_total;
3840 	} else
3841 		ASSERT(0);
3842 }
3843 
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3844 void dcn10_setup_periodic_interrupt(
3845 		struct dc *dc,
3846 		struct pipe_ctx *pipe_ctx)
3847 {
3848 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3849 	uint32_t start_line = 0;
3850 	uint32_t end_line = 0;
3851 
3852 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3853 
3854 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3855 }
3856 
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3857 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3858 {
3859 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3860 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3861 
3862 	if (start_line < 0) {
3863 		ASSERT(0);
3864 		start_line = 0;
3865 	}
3866 
3867 	if (tg->funcs->setup_vertical_interrupt2)
3868 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3869 }
3870 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3871 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3872 		struct dc_link_settings *link_settings)
3873 {
3874 	struct encoder_unblank_param params = {0};
3875 	struct dc_stream_state *stream = pipe_ctx->stream;
3876 	struct dc_link *link = stream->link;
3877 	struct dce_hwseq *hws = link->dc->hwseq;
3878 
3879 	/* only 3 items below are used by unblank */
3880 	params.timing = pipe_ctx->stream->timing;
3881 
3882 	params.link_settings.link_rate = link_settings->link_rate;
3883 
3884 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3885 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3886 			params.timing.pix_clk_100hz /= 2;
3887 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3888 	}
3889 
3890 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3891 		hws->funcs.edp_backlight_control(link, true);
3892 	}
3893 }
3894 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3895 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3896 				const uint8_t *custom_sdp_message,
3897 				unsigned int sdp_message_size)
3898 {
3899 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3900 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3901 				pipe_ctx->stream_res.stream_enc,
3902 				custom_sdp_message,
3903 				sdp_message_size);
3904 	}
3905 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3906 enum dc_status dcn10_set_clock(struct dc *dc,
3907 			enum dc_clock_type clock_type,
3908 			uint32_t clk_khz,
3909 			uint32_t stepping)
3910 {
3911 	struct dc_state *context = dc->current_state;
3912 	struct dc_clock_config clock_cfg = {0};
3913 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3914 
3915 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3916 		return DC_FAIL_UNSUPPORTED_1;
3917 
3918 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3919 		context, clock_type, &clock_cfg);
3920 
3921 	if (clk_khz > clock_cfg.max_clock_khz)
3922 		return DC_FAIL_CLK_EXCEED_MAX;
3923 
3924 	if (clk_khz < clock_cfg.min_clock_khz)
3925 		return DC_FAIL_CLK_BELOW_MIN;
3926 
3927 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3928 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3929 
3930 	/*update internal request clock for update clock use*/
3931 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3932 		current_clocks->dispclk_khz = clk_khz;
3933 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3934 		current_clocks->dppclk_khz = clk_khz;
3935 	else
3936 		return DC_ERROR_UNEXPECTED;
3937 
3938 	if (dc->clk_mgr->funcs->update_clocks)
3939 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3940 				context, true);
3941 	return DC_OK;
3942 
3943 }
3944 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3945 void dcn10_get_clock(struct dc *dc,
3946 			enum dc_clock_type clock_type,
3947 			struct dc_clock_config *clock_cfg)
3948 {
3949 	struct dc_state *context = dc->current_state;
3950 
3951 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3952 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3953 
3954 }
3955 
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3956 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3957 {
3958 	struct resource_pool *pool = dc->res_pool;
3959 	int i;
3960 
3961 	for (i = 0; i < pool->pipe_count; i++) {
3962 		struct hubp *hubp = pool->hubps[i];
3963 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3964 
3965 		hubp->funcs->hubp_read_state(hubp);
3966 
3967 		if (!s->blank_en)
3968 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3969 	}
3970 }
3971