xref: /linux/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4 
5 
6 #include "os_types.h"
7 #include "dm_services.h"
8 #include "basics/dc_common.h"
9 #include "dm_helpers.h"
10 #include "core_types.h"
11 #include "resource.h"
12 #include "dccg.h"
13 #include "dce/dce_hwseq.h"
14 #include "reg_helper.h"
15 #include "abm.h"
16 #include "hubp.h"
17 #include "dchubbub.h"
18 #include "timing_generator.h"
19 #include "opp.h"
20 #include "ipp.h"
21 #include "mpc.h"
22 #include "mcif_wb.h"
23 #include "dc_dmub_srv.h"
24 #include "link_hwss.h"
25 #include "dpcd_defs.h"
26 #include "clk_mgr.h"
27 #include "dsc.h"
28 #include "link_service.h"
29 #include "custom_float.h"
30 
31 #include "dce/dmub_hw_lock_mgr.h"
32 #include "dcn10/dcn10_cm_common.h"
33 #include "dcn10/dcn10_hubbub.h"
34 #include "dcn20/dcn20_optc.h"
35 #include "dcn30/dcn30_cm_common.h"
36 #include "dcn32/dcn32_hwseq.h"
37 #include "dcn401_hwseq.h"
38 #include "dcn401/dcn401_resource.h"
39 #include "dc_state_priv.h"
40 #include "link_enc_cfg.h"
41 #include "../hw_sequencer.h"
42 
43 #define DC_LOGGER_INIT(logger)
44 
45 #define CTX \
46 	hws->ctx
47 #define REG(reg)\
48 	hws->regs->reg
49 #define DC_LOGGER \
50 	dc->ctx->logger
51 
52 
53 #undef FN
54 #define FN(reg_name, field_name) \
55 	hws->shifts->field_name, hws->masks->field_name
56 
dcn401_initialize_min_clocks(struct dc * dc)57 void dcn401_initialize_min_clocks(struct dc *dc)
58 {
59 	struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
60 
61 	clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
62 	clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
63 	clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
64 	clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
65 	clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
66 	if (dc->debug.disable_boot_optimizations) {
67 		clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
68 	} else {
69 		/* Even though DPG_EN = 1 for the connected display, it still requires the
70 		 * correct timing so we cannot set DISPCLK to min freq or it could cause
71 		 * audio corruption. Read current DISPCLK from DENTIST and request the same
72 		 * freq to ensure that the timing is valid and unchanged.
73 		 */
74 		clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
75 	}
76 	clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
77 	clocks->fclk_p_state_change_support = true;
78 	clocks->p_state_change_support = true;
79 
80 	dc->clk_mgr->funcs->update_clocks(
81 			dc->clk_mgr,
82 			dc->current_state,
83 			true);
84 }
85 
dcn401_program_gamut_remap(struct pipe_ctx * pipe_ctx)86 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
87 {
88 	unsigned int i = 0;
89 	struct mpc_grph_gamut_adjustment mpc_adjust;
90 	unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
91 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
92 
93 	//For now assert if location is not pre-blend
94 	if (pipe_ctx->plane_state)
95 		ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
96 
97 	// program MPCC_MCM_FIRST_GAMUT_REMAP
98 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
99 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
100 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP;
101 
102 	if (pipe_ctx->plane_state &&
103 		pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
104 		mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
105 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
106 			mpc_adjust.temperature_matrix[i] =
107 			pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
108 	}
109 
110 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
111 
112 	// program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now
113 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
114 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP;
115 
116 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
117 
118 	// program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x
119 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
120 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
121 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP;
122 
123 	if (pipe_ctx->top_pipe == NULL) {
124 		if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
125 			mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
126 			for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
127 				mpc_adjust.temperature_matrix[i] =
128 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
129 		}
130 	}
131 
132 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
133 }
134 
dcn401_init_hw(struct dc * dc)135 void dcn401_init_hw(struct dc *dc)
136 {
137 	struct abm **abms = dc->res_pool->multiple_abms;
138 	struct dce_hwseq *hws = dc->hwseq;
139 	struct dc_bios *dcb = dc->ctx->dc_bios;
140 	struct resource_pool *res_pool = dc->res_pool;
141 	int i;
142 	int edp_num;
143 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
144 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
145 	int current_dchub_ref_freq = 0;
146 
147 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
148 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
149 
150 		// mark dcmode limits present if any clock has distinct AC and DC values from SMU
151 		dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
152 				dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
153 	}
154 
155 	// Initialize the dccg
156 	if (res_pool->dccg->funcs->dccg_init)
157 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
158 
159 	// Disable DMUB Initialization until IPS state programming is finalized
160 	//if (!dcb->funcs->is_accelerated_mode(dcb)) {
161 	//	hws->funcs.bios_golden_init(dc);
162 	//}
163 
164 	// Set default OPTC memory power states
165 	if (dc->debug.enable_mem_low_power.bits.optc) {
166 		// Shutdown when unassigned and light sleep in VBLANK
167 		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
168 	}
169 
170 	if (dc->debug.enable_mem_low_power.bits.vga) {
171 		// Power down VGA memory
172 		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
173 	}
174 
175 	if (dc->ctx->dc_bios->fw_info_valid) {
176 		res_pool->ref_clocks.xtalin_clock_inKhz =
177 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
178 
179 		if (res_pool->hubbub) {
180 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
181 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
182 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
183 
184 			current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
185 
186 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
187 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
188 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
189 		} else {
190 			// Not all ASICs have DCCG sw component
191 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
192 					res_pool->ref_clocks.xtalin_clock_inKhz;
193 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
194 					res_pool->ref_clocks.xtalin_clock_inKhz;
195 		}
196 	} else
197 		ASSERT_CRITICAL(false);
198 
199 	for (i = 0; i < dc->link_count; i++) {
200 		/* Power up AND update implementation according to the
201 		 * required signal (which may be different from the
202 		 * default signal on connector).
203 		 */
204 		struct dc_link *link = dc->links[i];
205 
206 		if (link->ep_type != DISPLAY_ENDPOINT_PHY)
207 			continue;
208 
209 		link->link_enc->funcs->hw_init(link->link_enc);
210 
211 		/* Check for enabled DIG to identify enabled display */
212 		if (link->link_enc->funcs->is_dig_enabled &&
213 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
214 			link->link_status.link_active = true;
215 			link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
216 			if (link->link_enc->funcs->fec_is_active &&
217 					link->link_enc->funcs->fec_is_active(link->link_enc))
218 				link->fec_state = dc_link_fec_enabled;
219 		}
220 	}
221 
222 	/* enable_power_gating_plane before dsc_pg_control because
223 	 * FORCEON = 1 with hw default value on bootup, resume from s3
224 	 */
225 	if (hws->funcs.enable_power_gating_plane)
226 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
227 
228 	/* we want to turn off all dp displays before doing detection */
229 	dc->link_srv->blank_all_dp_displays(dc);
230 
231 	/* If taking control over from VBIOS, we may want to optimize our first
232 	 * mode set, so we need to skip powering down pipes until we know which
233 	 * pipes we want to use.
234 	 * Otherwise, if taking control is not possible, we need to power
235 	 * everything down.
236 	 */
237 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
238 		/* Disable boot optimizations means power down everything including PHY, DIG,
239 		 * and OTG (i.e. the boot is not optimized because we do a full power down).
240 		 */
241 		if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
242 			dc->hwss.enable_accelerated_mode(dc, dc->current_state);
243 		else
244 			hws->funcs.init_pipes(dc, dc->current_state);
245 
246 		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
247 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
248 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
249 
250 		dcn401_initialize_min_clocks(dc);
251 
252 		/* On HW init, allow idle optimizations after pipes have been turned off.
253 		 *
254 		 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
255 		 * is reset (i.e. not in idle at the time hw init is called), but software state
256 		 * still has idle_optimizations = true, so we must disable idle optimizations first
257 		 * (i.e. set false), then re-enable (set true).
258 		 */
259 		dc_allow_idle_optimizations(dc, false);
260 		dc_allow_idle_optimizations(dc, true);
261 	}
262 
263 	/* In headless boot cases, DIG may be turned
264 	 * on which causes HW/SW discrepancies.
265 	 * To avoid this, power down hardware on boot
266 	 * if DIG is turned on and seamless boot not enabled
267 	 */
268 	if (!dc->config.seamless_boot_edp_requested) {
269 		struct dc_link *edp_links[MAX_NUM_EDP];
270 		struct dc_link *edp_link;
271 
272 		dc_get_edp_links(dc, edp_links, &edp_num);
273 		if (edp_num) {
274 			for (i = 0; i < edp_num; i++) {
275 				edp_link = edp_links[i];
276 				if (edp_link->link_enc->funcs->is_dig_enabled &&
277 						edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
278 						dc->hwss.edp_backlight_control &&
279 						hws->funcs.power_down &&
280 						dc->hwss.edp_power_control) {
281 					dc->hwss.edp_backlight_control(edp_link, false);
282 					hws->funcs.power_down(dc);
283 					dc->hwss.edp_power_control(edp_link, false);
284 				}
285 			}
286 		} else {
287 			for (i = 0; i < dc->link_count; i++) {
288 				struct dc_link *link = dc->links[i];
289 
290 				if (link->link_enc->funcs->is_dig_enabled &&
291 						link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
292 						hws->funcs.power_down) {
293 					hws->funcs.power_down(dc);
294 					break;
295 				}
296 
297 			}
298 		}
299 	}
300 
301 	for (i = 0; i < res_pool->audio_count; i++) {
302 		struct audio *audio = res_pool->audios[i];
303 
304 		audio->funcs->hw_init(audio);
305 	}
306 
307 	for (i = 0; i < dc->link_count; i++) {
308 		struct dc_link *link = dc->links[i];
309 
310 		if (link->panel_cntl) {
311 			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
312 			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
313 		}
314 	}
315 
316 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
317 		if (abms[i] != NULL && abms[i]->funcs != NULL)
318 			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
319 	}
320 
321 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
322 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
323 
324 	if (!dc->debug.disable_clock_gate) {
325 		/* enable all DCN clock gating */
326 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
327 
328 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
329 
330 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
331 	}
332 
333 	dcn401_setup_hpo_hw_control(hws, true);
334 
335 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
336 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
337 
338 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
339 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
340 
341 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
342 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
343 				dc->res_pool->hubbub, false, false);
344 
345 	if (dc->res_pool->hubbub->funcs->init_crb)
346 		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
347 
348 	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
349 		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
350 
351 	// Get DMCUB capabilities
352 	if (dc->ctx->dmub_srv) {
353 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
354 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
355 		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
356 		dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
357 		dc->debug.fams2_config.bits.enable &=
358 				dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
359 		if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
360 			|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
361 			/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
362 			if (dc->clk_mgr)
363 				dc->res_pool->funcs->update_bw_bounding_box(dc,
364 									    dc->clk_mgr->bw_params);
365 		}
366 	}
367 }
368 
dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc * dc,struct pipe_ctx * pipe_ctx,enum MCM_LUT_XABLE * shaper_xable,enum MCM_LUT_XABLE * lut3d_xable,enum MCM_LUT_XABLE * lut1d_xable)369 static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
370 		enum MCM_LUT_XABLE *shaper_xable,
371 		enum MCM_LUT_XABLE *lut3d_xable,
372 		enum MCM_LUT_XABLE *lut1d_xable)
373 {
374 	enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
375 	bool lut1d_enable = false;
376 	struct mpc *mpc = dc->res_pool->mpc;
377 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
378 
379 	if (!pipe_ctx->plane_state)
380 		return;
381 	shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
382 	lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
383 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
384 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
385 
386 	*lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
387 
388 	switch (shaper_3dlut_setting) {
389 	case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
390 		*lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
391 		break;
392 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
393 		*lut3d_xable = MCM_LUT_DISABLE;
394 		*shaper_xable = MCM_LUT_ENABLE;
395 		break;
396 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
397 		*lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
398 		break;
399 	}
400 }
401 
dcn401_populate_mcm_luts(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_cm2_func_luts mcm_luts,bool lut_bank_a)402 void dcn401_populate_mcm_luts(struct dc *dc,
403 		struct pipe_ctx *pipe_ctx,
404 		struct dc_cm2_func_luts mcm_luts,
405 		bool lut_bank_a)
406 {
407 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
408 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
409 	int mpcc_id = hubp->inst;
410 	struct mpc *mpc = dc->res_pool->mpc;
411 	union mcm_lut_params m_lut_params;
412 	enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
413 	enum hubp_3dlut_fl_format format = 0;
414 	enum hubp_3dlut_fl_mode mode;
415 	enum hubp_3dlut_fl_width width = 0;
416 	enum hubp_3dlut_fl_addressing_mode addr_mode;
417 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
418 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
419 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
420 	enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
421 	enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
422 	enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
423 	bool rval;
424 
425 	dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
426 
427 	/* 1D LUT */
428 	if (mcm_luts.lut1d_func) {
429 		memset(&m_lut_params, 0, sizeof(m_lut_params));
430 		if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
431 			m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
432 		else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
433 			rval = cm3_helper_translate_curve_to_hw_format(
434 					mcm_luts.lut1d_func,
435 					&dpp_base->regamma_params, false);
436 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
437 		}
438 		if (m_lut_params.pwl) {
439 			if (mpc->funcs->populate_lut)
440 				mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
441 		}
442 		if (mpc->funcs->program_lut_mode)
443 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
444 	}
445 
446 	/* Shaper */
447 	if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
448 		memset(&m_lut_params, 0, sizeof(m_lut_params));
449 		if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
450 			m_lut_params.pwl = &mcm_luts.shaper->pwl;
451 		else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
452 			ASSERT(false);
453 			rval = cm3_helper_translate_curve_to_hw_format(
454 					mcm_luts.shaper,
455 					&dpp_base->regamma_params, true);
456 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
457 		}
458 		if (m_lut_params.pwl) {
459 			if (mpc->funcs->mcm.populate_lut)
460 				mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
461 			if (mpc->funcs->program_lut_mode)
462 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
463 		}
464 	}
465 
466 	/* 3DLUT */
467 	switch (lut3d_src) {
468 	case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
469 		memset(&m_lut_params, 0, sizeof(m_lut_params));
470 		if (hubp->funcs->hubp_enable_3dlut_fl)
471 			hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
472 
473 		if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
474 			m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
475 			if (mpc->funcs->populate_lut)
476 				mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
477 			if (mpc->funcs->program_lut_mode)
478 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
479 						mpcc_id);
480 		}
481 		break;
482 		case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
483 		switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
484 		case DC_CM2_GPU_MEM_SIZE_171717:
485 			width = hubp_3dlut_fl_width_17;
486 			break;
487 		case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
488 			width = hubp_3dlut_fl_width_transformed;
489 			break;
490 		default:
491 			//TODO: handle default case
492 			break;
493 		}
494 
495 		//check for support
496 		if (mpc->funcs->mcm.is_config_supported &&
497 			!mpc->funcs->mcm.is_config_supported(width))
498 			break;
499 
500 		if (mpc->funcs->program_lut_read_write_control)
501 			mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
502 		if (mpc->funcs->program_lut_mode)
503 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
504 
505 		if (hubp->funcs->hubp_program_3dlut_fl_addr)
506 			hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
507 
508 		if (mpc->funcs->mcm.program_bit_depth)
509 			mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
510 
511 		switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
512 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
513 			mode = hubp_3dlut_fl_mode_native_1;
514 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
515 			break;
516 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
517 			mode = hubp_3dlut_fl_mode_native_2;
518 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
519 			break;
520 		case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
521 			mode = hubp_3dlut_fl_mode_transform;
522 			addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
523 			break;
524 		default:
525 			mode = hubp_3dlut_fl_mode_disable;
526 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
527 			break;
528 		}
529 		if (hubp->funcs->hubp_program_3dlut_fl_mode)
530 			hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
531 
532 		if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
533 			hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
534 
535 		switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
536 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
537 			format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
538 			break;
539 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
540 			format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
541 			break;
542 		case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
543 			format = hubp_3dlut_fl_format_float_fp1_5_10;
544 			break;
545 		}
546 		if (hubp->funcs->hubp_program_3dlut_fl_format)
547 			hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
548 		if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
549 				mpc->funcs->mcm.program_bias_scale) {
550 			mpc->funcs->mcm.program_bias_scale(mpc,
551 				mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
552 				mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
553 				mpcc_id);
554 			hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
555 						mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
556 						mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
557 		}
558 
559 		//navi 4x has a bug and r and blue are swapped and need to be worked around here in
560 		//TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
561 		switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
562 		case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
563 		default:
564 			crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
565 			crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
566 			crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
567 			break;
568 		}
569 
570 		if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
571 			hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
572 					crossbar_bit_slice_cr_r,
573 					crossbar_bit_slice_y_g,
574 					crossbar_bit_slice_cb_b);
575 
576 		if (mpc->funcs->mcm.program_lut_read_write_control)
577 			mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
578 
579 		if (mpc->funcs->mcm.program_3dlut_size)
580 			mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
581 
582 		if (mpc->funcs->update_3dlut_fast_load_select)
583 			mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
584 
585 		if (hubp->funcs->hubp_enable_3dlut_fl)
586 			hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
587 		else {
588 			if (mpc->funcs->program_lut_mode) {
589 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
590 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
591 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
592 			}
593 		}
594 		break;
595 
596 	}
597 }
598 
dcn401_trigger_3dlut_dma_load(struct dc * dc,struct pipe_ctx * pipe_ctx)599 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
600 {
601 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
602 
603 	if (hubp->funcs->hubp_enable_3dlut_fl) {
604 		hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
605 	}
606 }
607 
dcn401_set_mcm_luts(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)608 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
609 				const struct dc_plane_state *plane_state)
610 {
611 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
612 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
613 	struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
614 	struct mpc *mpc = dc->res_pool->mpc;
615 	bool result;
616 	const struct pwl_params *lut_params = NULL;
617 	bool rval;
618 
619 	if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
620 		dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
621 		return true;
622 	}
623 
624 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
625 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
626 	// 1D LUT
627 	if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
628 		lut_params = &plane_state->blend_tf.pwl;
629 	else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
630 		rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
631 				&dpp_base->regamma_params, false);
632 		lut_params = rval ? &dpp_base->regamma_params : NULL;
633 	}
634 	result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
635 	lut_params = NULL;
636 
637 	// Shaper
638 	if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
639 		lut_params = &plane_state->in_shaper_func.pwl;
640 	else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
641 		// TODO: dpp_base replace
642 		rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
643 				&dpp_base->shaper_params, true);
644 		lut_params = rval ? &dpp_base->shaper_params : NULL;
645 	}
646 	result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
647 
648 	// 3D
649 	if (mpc->funcs->program_3dlut) {
650 		if (plane_state->lut3d_func.state.bits.initialized == 1)
651 			result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
652 		else
653 			result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
654 	}
655 
656 	return result;
657 }
658 
dcn401_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)659 bool dcn401_set_output_transfer_func(struct dc *dc,
660 				struct pipe_ctx *pipe_ctx,
661 				const struct dc_stream_state *stream)
662 {
663 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
664 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
665 	const struct pwl_params *params = NULL;
666 	bool ret = false;
667 
668 	/* program OGAM or 3DLUT only for the top pipe*/
669 	if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
670 		/*program shaper and 3dlut in MPC*/
671 		ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
672 		if (ret == false && mpc->funcs->set_output_gamma) {
673 			if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
674 				params = &stream->out_transfer_func.pwl;
675 			else if (pipe_ctx->stream->out_transfer_func.type ==
676 					TF_TYPE_DISTRIBUTED_POINTS &&
677 					cm3_helper_translate_curve_to_hw_format(
678 					&stream->out_transfer_func,
679 					&mpc->blender_params, false))
680 				params = &mpc->blender_params;
681 			/* there are no ROM LUTs in OUTGAM */
682 			if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
683 				BREAK_TO_DEBUGGER();
684 		}
685 	}
686 
687 	if (mpc->funcs->set_output_gamma)
688 		mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
689 
690 	return ret;
691 }
692 
dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx * pipe_ctx,unsigned int * tmds_div)693 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
694 				unsigned int *tmds_div)
695 {
696 	struct dc_stream_state *stream = pipe_ctx->stream;
697 
698 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
699 		if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
700 			*tmds_div = PIXEL_RATE_DIV_BY_2;
701 		else
702 			*tmds_div = PIXEL_RATE_DIV_BY_4;
703 	} else {
704 		*tmds_div = PIXEL_RATE_DIV_BY_1;
705 	}
706 
707 	if (*tmds_div == PIXEL_RATE_DIV_NA)
708 		ASSERT(false);
709 
710 }
711 
enable_stream_timing_calc(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc,unsigned int * tmds_div,int * opp_inst,int * opp_cnt,struct pipe_ctx * opp_heads[MAX_PIPES],bool * manual_mode,struct drr_params * params,unsigned int * event_triggers)712 static void enable_stream_timing_calc(
713 		struct pipe_ctx *pipe_ctx,
714 		struct dc_state *context,
715 		struct dc *dc,
716 		unsigned int *tmds_div,
717 		int *opp_inst,
718 		int *opp_cnt,
719 		struct pipe_ctx *opp_heads[MAX_PIPES],
720 		bool *manual_mode,
721 		struct drr_params *params,
722 		unsigned int *event_triggers)
723 {
724 	struct dc_stream_state *stream = pipe_ctx->stream;
725 	int i;
726 
727 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal))
728 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
729 
730 	*opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads);
731 	for (i = 0; i < *opp_cnt; i++)
732 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
733 
734 	if (dc_is_tmds_signal(stream->signal)) {
735 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
736 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
737 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
738 		else
739 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
740 	}
741 
742 	params->vertical_total_min = stream->adjust.v_total_min;
743 	params->vertical_total_max = stream->adjust.v_total_max;
744 	params->vertical_total_mid = stream->adjust.v_total_mid;
745 	params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
746 
747 	// DRR should set trigger event to monitor surface update event
748 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
749 		*event_triggers = 0x80;
750 }
751 
dcn401_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)752 enum dc_status dcn401_enable_stream_timing(
753 		struct pipe_ctx *pipe_ctx,
754 		struct dc_state *context,
755 		struct dc *dc)
756 {
757 	struct dce_hwseq *hws = dc->hwseq;
758 	struct dc_stream_state *stream = pipe_ctx->stream;
759 	struct drr_params params = {0};
760 	unsigned int event_triggers = 0;
761 	int opp_cnt = 1;
762 	int opp_inst[MAX_PIPES] = {0};
763 	struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
764 	struct dc_crtc_timing patched_crtc_timing = stream->timing;
765 	bool manual_mode = false;
766 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
767 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
768 	int odm_slice_width;
769 	int last_odm_slice_width;
770 	int i;
771 
772 	if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
773 		return DC_OK;
774 
775 	enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst,
776 			&opp_cnt, opp_heads, &manual_mode, &params, &event_triggers);
777 
778 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
779 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
780 			dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst,
781 			tmds_div, unused_div);
782 	}
783 
784 	/* TODO check if timing_changed, disable stream if timing changed */
785 
786 	if (opp_cnt > 1) {
787 		odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
788 		last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
789 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
790 				pipe_ctx->stream_res.tg,
791 				opp_inst, opp_cnt,
792 				odm_slice_width, last_odm_slice_width);
793 	}
794 
795 	/* set DTBCLK_P */
796 	if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
797 		if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
798 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
799 		}
800 	}
801 
802 	/* HW program guide assume display already disable
803 	 * by unplug sequence. OTG assume stop.
804 	 */
805 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
806 
807 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
808 			pipe_ctx->clock_source,
809 			&pipe_ctx->stream_res.pix_clk_params,
810 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
811 			&pipe_ctx->pll_settings)) {
812 		BREAK_TO_DEBUGGER();
813 		return DC_ERROR_UNEXPECTED;
814 	}
815 
816 	if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
817 		dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
818 
819 	/* if we are padding, h_addressable needs to be adjusted */
820 	if (dc->debug.enable_hblank_borrow) {
821 		patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
822 		patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
823 		patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
824 	}
825 
826 	pipe_ctx->stream_res.tg->funcs->program_timing(
827 		pipe_ctx->stream_res.tg,
828 		&patched_crtc_timing,
829 		(unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
830 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
831 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
832 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
833 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
834 		pipe_ctx->stream->signal,
835 		true);
836 
837 	for (i = 0; i < opp_cnt; i++) {
838 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
839 				opp_heads[i]->stream_res.opp,
840 				true);
841 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
842 				opp_heads[i]->stream_res.opp,
843 				stream->timing.pixel_encoding,
844 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
845 	}
846 
847 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
848 			pipe_ctx->stream_res.opp,
849 			true);
850 
851 	hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
852 
853 	/* VTG is  within DCHUB command block. DCFCLK is always on */
854 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
855 		BREAK_TO_DEBUGGER();
856 		return DC_ERROR_UNEXPECTED;
857 	}
858 
859 	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
860 	set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
861 
862 	/* Event triggers and num frames initialized for DRR, but can be
863 	 * later updated for PSR use. Note DRR trigger events are generated
864 	 * regardless of whether num frames met.
865 	 */
866 	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
867 		pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
868 				pipe_ctx->stream_res.tg, event_triggers, 2);
869 
870 	/* TODO program crtc source select for non-virtual signal*/
871 	/* TODO program FMT */
872 	/* TODO setup link_enc */
873 	/* TODO set stream attributes */
874 	/* TODO program audio */
875 	/* TODO enable stream if timing changed */
876 	/* TODO unblank stream if DP */
877 
878 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
879 		if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
880 			pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
881 	}
882 
883 	return DC_OK;
884 }
885 
get_phyd32clk_src(struct dc_link * link)886 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
887 {
888 	switch (link->link_enc->transmitter) {
889 	case TRANSMITTER_UNIPHY_A:
890 		return PHYD32CLKA;
891 	case TRANSMITTER_UNIPHY_B:
892 		return PHYD32CLKB;
893 	case TRANSMITTER_UNIPHY_C:
894 		return PHYD32CLKC;
895 	case TRANSMITTER_UNIPHY_D:
896 		return PHYD32CLKD;
897 	case TRANSMITTER_UNIPHY_E:
898 		return PHYD32CLKE;
899 	default:
900 		return PHYD32CLKA;
901 	}
902 }
903 
dcn401_enable_stream_calc(struct pipe_ctx * pipe_ctx,int * dp_hpo_inst,enum phyd32clk_clock_source * phyd32clk,unsigned int * tmds_div,uint32_t * early_control)904 static void dcn401_enable_stream_calc(
905 		struct pipe_ctx *pipe_ctx,
906 		int *dp_hpo_inst,
907 		enum phyd32clk_clock_source *phyd32clk,
908 		unsigned int *tmds_div,
909 		uint32_t *early_control)
910 {
911 
912 	struct dc *dc = pipe_ctx->stream->ctx->dc;
913 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
914 	enum dc_lane_count lane_count =
915 			pipe_ctx->stream->link->cur_link_settings.lane_count;
916 	uint32_t active_total_with_borders;
917 
918 	if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))
919 		*dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
920 
921 	*phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
922 
923 	if (dc_is_tmds_signal(pipe_ctx->stream->signal))
924 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
925 	else
926 		*tmds_div = PIXEL_RATE_DIV_BY_1;
927 
928 	/* enable early control to avoid corruption on DP monitor*/
929 	active_total_with_borders =
930 			timing->h_addressable
931 				+ timing->h_border_left
932 				+ timing->h_border_right;
933 
934 	if (lane_count != 0)
935 		*early_control = active_total_with_borders % lane_count;
936 
937 	if (*early_control == 0)
938 		*early_control = lane_count;
939 
940 }
941 
dcn401_enable_stream(struct pipe_ctx * pipe_ctx)942 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
943 {
944 	uint32_t early_control = 0;
945 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
946 	struct dc_link *link = pipe_ctx->stream->link;
947 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
948 	struct dc *dc = pipe_ctx->stream->ctx->dc;
949 	struct dccg *dccg = dc->res_pool->dccg;
950 	enum phyd32clk_clock_source phyd32clk;
951 	int dp_hpo_inst = 0;
952 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
953 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
954 	struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
955 	struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
956 
957 	if (!dc->config.unify_link_enc_assignment)
958 		link_enc = link_enc_cfg_get_link_enc(link);
959 
960 	dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
961 				&tmds_div, &early_control);
962 
963 	if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
964 		if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
965 			dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
966 			if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
967 				dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
968 			} else {
969 				dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
970 			}
971 		} else {
972 			dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
973 					link_enc->transmitter - TRANSMITTER_UNIPHY_A);
974 		}
975 	}
976 
977 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
978 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
979 			dc->res_pool->dccg,
980 			pipe_ctx->stream_res.tg->inst,
981 			tmds_div,
982 			unused_div);
983 	}
984 
985 	link_hwss->setup_stream_encoder(pipe_ctx);
986 
987 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
988 		if (dc->hwss.program_dmdata_engine)
989 			dc->hwss.program_dmdata_engine(pipe_ctx);
990 	}
991 
992 	dc->hwss.update_info_frame(pipe_ctx);
993 
994 	if (dc_is_dp_signal(pipe_ctx->stream->signal))
995 		dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
996 
997 	tg->funcs->set_early_control(tg, early_control);
998 }
999 
dcn401_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)1000 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
1001 {
1002 	REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
1003 }
1004 
adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width,struct dc_cursor_position * pos_cpy)1005 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
1006 {
1007 	if (cursor_width <= 128) {
1008 		pos_cpy->x_hotspot /= 2;
1009 		pos_cpy->x_hotspot += 1;
1010 	} else {
1011 		pos_cpy->x_hotspot /= 2;
1012 		pos_cpy->x_hotspot += 2;
1013 	}
1014 }
1015 
disable_link_output_symclk_on_tx_off(struct dc_link * link,enum dp_link_encoding link_encoding)1016 static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding)
1017 {
1018 	struct dc *dc = link->ctx->dc;
1019 	struct pipe_ctx *pipe_ctx = NULL;
1020 	uint8_t i;
1021 
1022 	for (i = 0; i < MAX_PIPES; i++) {
1023 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1024 		if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
1025 			pipe_ctx->clock_source->funcs->program_pix_clk(
1026 					pipe_ctx->clock_source,
1027 					&pipe_ctx->stream_res.pix_clk_params,
1028 					link_encoding,
1029 					&pipe_ctx->pll_settings);
1030 			break;
1031 		}
1032 	}
1033 }
1034 
dcn401_disable_link_output(struct dc_link * link,const struct link_resource * link_res,enum signal_type signal)1035 void dcn401_disable_link_output(struct dc_link *link,
1036 		const struct link_resource *link_res,
1037 		enum signal_type signal)
1038 {
1039 	struct dc *dc = link->ctx->dc;
1040 	const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
1041 	struct dmcu *dmcu = dc->res_pool->dmcu;
1042 
1043 	if (signal == SIGNAL_TYPE_EDP &&
1044 			link->dc->hwss.edp_backlight_control &&
1045 			!link->skip_implict_edp_power_control)
1046 		link->dc->hwss.edp_backlight_control(link, false);
1047 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
1048 		dmcu->funcs->lock_phy(dmcu);
1049 
1050 	if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) {
1051 		disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING);
1052 		link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
1053 	} else {
1054 		link_hwss->disable_link_output(link, link_res, signal);
1055 		link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1056 	}
1057 
1058 	if (signal == SIGNAL_TYPE_EDP &&
1059 			link->dc->hwss.edp_backlight_control &&
1060 			!link->skip_implict_edp_power_control)
1061 		link->dc->hwss.edp_power_control(link, false);
1062 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
1063 		dmcu->funcs->unlock_phy(dmcu);
1064 
1065 	dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
1066 }
1067 
dcn401_set_cursor_position(struct pipe_ctx * pipe_ctx)1068 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
1069 {
1070 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
1071 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1072 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1073 	struct dc_cursor_mi_param param = {
1074 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
1075 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
1076 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
1077 		.recout = pipe_ctx->plane_res.scl_data.recout,
1078 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
1079 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
1080 		.rotation = pipe_ctx->plane_state->rotation,
1081 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
1082 		.stream = pipe_ctx->stream
1083 	};
1084 	struct rect odm_slice_src = { 0 };
1085 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
1086 		(pipe_ctx->prev_odm_pipe != NULL);
1087 	int prev_odm_width = 0;
1088 	struct pipe_ctx *prev_odm_pipe = NULL;
1089 	bool mpc_combine_on = false;
1090 	int  bottom_pipe_x_pos = 0;
1091 
1092 	int x_pos = pos_cpy.x;
1093 	int y_pos = pos_cpy.y;
1094 	int recout_x_pos = 0;
1095 	int recout_y_pos = 0;
1096 
1097 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
1098 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
1099 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
1100 			mpc_combine_on = true;
1101 		}
1102 	}
1103 
1104 	/* DCN4 moved cursor composition after Scaler, so in HW it is in
1105 	 * recout space and for HW Cursor position programming need to
1106 	 * translate to recout space.
1107 	 *
1108 	 * Cursor X and Y position programmed into HW can't be negative,
1109 	 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot
1110 	 * position that goes into HW X and Y coordinates while HW Hot spot
1111 	 * X and Y coordinates are length relative to the cursor top left
1112 	 * corner, hotspot must be smaller than the cursor size.
1113 	 *
1114 	 * DMs/DC interface for Cursor position is in stream->src space, and
1115 	 * DMs supposed to transform Cursor coordinates to stream->src space,
1116 	 * then here we need to translate Cursor coordinates to stream->dst
1117 	 * space, as now in HW, Cursor coordinates are in per pipe recout
1118 	 * space, and for the given pipe valid coordinates are only in range
1119 	 * from 0,0 - recout width, recout height space.
1120 	 * If certain pipe combining is in place, need to further adjust per
1121 	 * pipe to make sure each pipe enabling cursor on its part of the
1122 	 * screen.
1123 	 */
1124 	x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
1125 		pipe_ctx->stream->src.width;
1126 	y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
1127 		pipe_ctx->stream->src.height;
1128 
1129 	/* If the cursor's source viewport is clipped then we need to
1130 	 * translate the cursor to appear in the correct position on
1131 	 * the screen.
1132 	 *
1133 	 * This translation isn't affected by scaling so it needs to be
1134 	 * done *after* we adjust the position for the scale factor.
1135 	 *
1136 	 * This is only done by opt-in for now since there are still
1137 	 * some usecases like tiled display that might enable the
1138 	 * cursor on both streams while expecting dc to clip it.
1139 	 */
1140 	if (pos_cpy.translate_by_source) {
1141 		x_pos += pipe_ctx->plane_state->src_rect.x;
1142 		y_pos += pipe_ctx->plane_state->src_rect.y;
1143 	}
1144 
1145 	/* Adjust for ODM Combine
1146 	 * next/prev_odm_offset is to account for scaled modes that have underscan
1147 	 */
1148 	if (odm_combine_on) {
1149 		prev_odm_pipe = pipe_ctx->prev_odm_pipe;
1150 
1151 		while (prev_odm_pipe != NULL) {
1152 			odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
1153 			prev_odm_width += odm_slice_src.width;
1154 			prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
1155 		}
1156 
1157 		x_pos -= (prev_odm_width);
1158 	}
1159 
1160 	/* If the position is negative then we need to add to the hotspot
1161 	 * to fix cursor size between ODM slices
1162 	 */
1163 
1164 	if (x_pos < 0) {
1165 		pos_cpy.x_hotspot -= x_pos;
1166 		if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1167 			adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1168 		x_pos = 0;
1169 	}
1170 
1171 	if (y_pos < 0) {
1172 		pos_cpy.y_hotspot -= y_pos;
1173 		y_pos = 0;
1174 	}
1175 
1176 	/* If the position on bottom MPC pipe is negative then we need to add to the hotspot and
1177 	 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices.
1178 	 */
1179 	if (mpc_combine_on &&
1180 		pipe_ctx->top_pipe &&
1181 		(pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) {
1182 
1183 		bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x;
1184 		if (bottom_pipe_x_pos < 0) {
1185 			x_pos = pipe_ctx->plane_res.scl_data.recout.x;
1186 			pos_cpy.x_hotspot -= bottom_pipe_x_pos;
1187 			if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1188 				adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1189 		}
1190 	}
1191 
1192 	pos_cpy.x = (uint32_t)x_pos;
1193 	pos_cpy.y = (uint32_t)y_pos;
1194 
1195 	if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
1196 		pos_cpy.enable = false;
1197 
1198 	x_pos = pos_cpy.x - param.recout.x;
1199 	y_pos = pos_cpy.y - param.recout.y;
1200 
1201 	recout_x_pos = x_pos - pos_cpy.x_hotspot;
1202 	recout_y_pos = y_pos - pos_cpy.y_hotspot;
1203 
1204 	if (recout_x_pos >= (int)param.recout.width)
1205 		pos_cpy.enable = false;  /* not visible beyond right edge*/
1206 
1207 	if (recout_y_pos >= (int)param.recout.height)
1208 		pos_cpy.enable = false;  /* not visible beyond bottom edge*/
1209 
1210 	if (recout_x_pos + (int)hubp->curs_attr.width <= 0)
1211 		pos_cpy.enable = false;  /* not visible beyond left edge*/
1212 
1213 	if (recout_y_pos + (int)hubp->curs_attr.height <= 0)
1214 		pos_cpy.enable = false;  /* not visible beyond top edge*/
1215 
1216 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
1217 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
1218 }
1219 
dcn401_check_no_memory_request_for_cab(struct dc * dc)1220 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc)
1221 {
1222 	int i;
1223 
1224 	/* First, check no-memory-request case */
1225 	for (i = 0; i < dc->current_state->stream_count; i++) {
1226 		if ((dc->current_state->stream_status[i].plane_count) &&
1227 			(dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
1228 			/* Fail eligibility on a visible stream */
1229 			return false;
1230 	}
1231 
1232 	return true;
1233 }
1234 
dcn401_calculate_cab_allocation(struct dc * dc,struct dc_state * ctx)1235 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
1236 {
1237 	int i;
1238 	uint8_t num_ways = 0;
1239 	uint32_t mall_ss_size_bytes = 0;
1240 
1241 	mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
1242 	// TODO add additional logic for PSR active stream exclusion optimization
1243 	// mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
1244 
1245 	// Include cursor size for CAB allocation
1246 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1247 		struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
1248 
1249 		if (!pipe->stream || !pipe->plane_state)
1250 			continue;
1251 
1252 		mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
1253 	}
1254 
1255 	// Convert number of cache lines required to number of ways
1256 	if (dc->debug.force_mall_ss_num_ways > 0)
1257 		num_ways = dc->debug.force_mall_ss_num_ways;
1258 	else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes)
1259 		num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
1260 	else
1261 		num_ways = 0;
1262 
1263 	return num_ways;
1264 }
1265 
dcn401_apply_idle_power_optimizations(struct dc * dc,bool enable)1266 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
1267 {
1268 	union dmub_rb_cmd cmd;
1269 	uint8_t ways, i;
1270 	int j;
1271 	bool mall_ss_unsupported = false;
1272 	struct dc_plane_state *plane = NULL;
1273 
1274 	if (!dc->ctx->dmub_srv || !dc->current_state)
1275 		return false;
1276 
1277 	for (i = 0; i < dc->current_state->stream_count; i++) {
1278 		/* MALL SS messaging is not supported with PSR at this time */
1279 		if (dc->current_state->streams[i] != NULL &&
1280 				dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
1281 			DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
1282 			return false;
1283 		}
1284 	}
1285 
1286 	memset(&cmd, 0, sizeof(cmd));
1287 	cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
1288 	cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
1289 
1290 	if (enable) {
1291 		if (dcn401_check_no_memory_request_for_cab(dc)) {
1292 			/* 1. Check no memory request case for CAB.
1293 			 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
1294 			 */
1295 			DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
1296 			cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
1297 		} else {
1298 			/* 2. Check if all surfaces can fit in CAB.
1299 			 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
1300 			 * and configure HUBP's to fetch from MALL
1301 			 */
1302 			ways = dcn401_calculate_cab_allocation(dc, dc->current_state);
1303 
1304 			/* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
1305 			 * or TMZ surface, don't try to enter MALL.
1306 			 */
1307 			for (i = 0; i < dc->current_state->stream_count; i++) {
1308 				for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
1309 					plane = dc->current_state->stream_status[i].plane_states[j];
1310 
1311 					if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
1312 							plane->address.tmz_surface) {
1313 						mall_ss_unsupported = true;
1314 						break;
1315 					}
1316 				}
1317 				if (mall_ss_unsupported)
1318 					break;
1319 			}
1320 			if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
1321 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
1322 				cmd.cab.cab_alloc_ways = ways;
1323 				DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
1324 			} else {
1325 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
1326 				DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
1327 			}
1328 		}
1329 	} else {
1330 		/* Disable CAB */
1331 		cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
1332 		DC_LOG_MALL("idle optimization disabled\n");
1333 	}
1334 
1335 	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1336 
1337 	return true;
1338 }
1339 
dcn401_wait_for_dcc_meta_propagation(const struct dc * dc,const struct pipe_ctx * top_pipe)1340 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
1341 		const struct pipe_ctx *top_pipe)
1342 {
1343 	bool is_wait_needed = false;
1344 	const struct pipe_ctx *pipe_ctx = top_pipe;
1345 
1346 	/* check if any surfaces are updating address while using flip immediate and dcc */
1347 	while (pipe_ctx != NULL) {
1348 		if (pipe_ctx->plane_state &&
1349 				pipe_ctx->plane_state->dcc.enable &&
1350 				pipe_ctx->plane_state->flip_immediate &&
1351 				pipe_ctx->plane_state->update_flags.bits.addr_update) {
1352 			is_wait_needed = true;
1353 			break;
1354 		}
1355 
1356 		/* check next pipe */
1357 		pipe_ctx = pipe_ctx->bottom_pipe;
1358 	}
1359 
1360 	if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
1361 		udelay(dc->debug.dcc_meta_propagation_delay_us);
1362 	}
1363 }
1364 
dcn401_prepare_bandwidth(struct dc * dc,struct dc_state * context)1365 void dcn401_prepare_bandwidth(struct dc *dc,
1366 	struct dc_state *context)
1367 {
1368 	struct hubbub *hubbub = dc->res_pool->hubbub;
1369 	bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
1370 	unsigned int compbuf_size = 0;
1371 
1372 	/* Any transition into P-State support should disable MCLK switching first to avoid hangs */
1373 	if (p_state_change_support) {
1374 		dc->optimized_required = true;
1375 		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1376 	}
1377 
1378 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1379 		if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1380 				context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1381 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
1382 
1383 	/* Increase clocks */
1384 	dc->clk_mgr->funcs->update_clocks(
1385 			dc->clk_mgr,
1386 			context,
1387 			false);
1388 
1389 	/* program dchubbub watermarks:
1390 	 * For assigning optimized_required, use |= operator since we don't want
1391 	 * to clear the value if the optimize has not happened yet
1392 	 */
1393 	dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
1394 					&context->bw_ctx.bw.dcn.watermarks,
1395 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1396 					false);
1397 	/* update timeout thresholds */
1398 	if (hubbub->funcs->program_arbiter) {
1399 		dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
1400 	}
1401 
1402 	/* decrease compbuf size */
1403 	if (hubbub->funcs->program_compbuf_segments) {
1404 		compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
1405 		dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
1406 
1407 		hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
1408 	}
1409 
1410 	if (dc->debug.fams2_config.bits.enable) {
1411 		dcn401_dmub_hw_control_lock(dc, context, true);
1412 		dcn401_fams2_update_config(dc, context, false);
1413 		dcn401_dmub_hw_control_lock(dc, context, false);
1414 	}
1415 
1416 	if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
1417 		/* After disabling P-State, restore the original value to ensure we get the correct P-State
1418 		 * on the next optimize. */
1419 		context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
1420 	}
1421 }
1422 
dcn401_optimize_bandwidth(struct dc * dc,struct dc_state * context)1423 void dcn401_optimize_bandwidth(
1424 		struct dc *dc,
1425 		struct dc_state *context)
1426 {
1427 	int i;
1428 	struct hubbub *hubbub = dc->res_pool->hubbub;
1429 
1430 	/* enable fams2 if needed */
1431 	if (dc->debug.fams2_config.bits.enable) {
1432 		dcn401_dmub_hw_control_lock(dc, context, true);
1433 		dcn401_fams2_update_config(dc, context, true);
1434 		dcn401_dmub_hw_control_lock(dc, context, false);
1435 	}
1436 
1437 	/* program dchubbub watermarks */
1438 	hubbub->funcs->program_watermarks(hubbub,
1439 					&context->bw_ctx.bw.dcn.watermarks,
1440 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1441 					true);
1442 	/* update timeout thresholds */
1443 	if (hubbub->funcs->program_arbiter) {
1444 		hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true);
1445 	}
1446 
1447 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1448 		if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1449 				context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1450 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
1451 
1452 	/* increase compbuf size */
1453 	if (hubbub->funcs->program_compbuf_segments)
1454 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1455 
1456 	dc->clk_mgr->funcs->update_clocks(
1457 			dc->clk_mgr,
1458 			context,
1459 			true);
1460 	if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
1461 		for (i = 0; i < dc->res_pool->pipe_count; ++i) {
1462 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1463 
1464 			if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
1465 				&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
1466 				&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
1467 					pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
1468 						pipe_ctx->dlg_regs.min_dst_y_next_start);
1469 		}
1470 	}
1471 }
1472 
dcn401_dmub_hw_control_lock(struct dc * dc,struct dc_state * context,bool lock)1473 void dcn401_dmub_hw_control_lock(struct dc *dc,
1474 		struct dc_state *context,
1475 		bool lock)
1476 {
1477 	/* use always for now */
1478 	union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1479 
1480 	if (!dc->ctx || !dc->ctx->dmub_srv)
1481 		return;
1482 
1483 	if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc))
1484 		return;
1485 
1486 	hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1487 	hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1488 	hw_lock_cmd.bits.lock = lock;
1489 	hw_lock_cmd.bits.should_release = !lock;
1490 	dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1491 }
1492 
dcn401_dmub_hw_control_lock_fast(union block_sequence_params * params)1493 void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params)
1494 {
1495 	struct dc *dc = params->dmub_hw_control_lock_fast_params.dc;
1496 	bool lock = params->dmub_hw_control_lock_fast_params.lock;
1497 
1498 	if (params->dmub_hw_control_lock_fast_params.is_required) {
1499 		union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1500 
1501 		hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1502 		hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1503 		hw_lock_cmd.bits.lock = lock;
1504 		hw_lock_cmd.bits.should_release = !lock;
1505 		dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1506 	}
1507 }
1508 
dcn401_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1509 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable)
1510 {
1511 	bool fams2_required;
1512 
1513 	if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1514 		return;
1515 
1516 	fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
1517 
1518 	dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required);
1519 }
1520 
update_dsc_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1521 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
1522 		struct pipe_ctx *otg_master)
1523 {
1524 	int i;
1525 	struct pipe_ctx *old_pipe;
1526 	struct pipe_ctx *new_pipe;
1527 	struct pipe_ctx *old_opp_heads[MAX_PIPES];
1528 	struct pipe_ctx *old_otg_master;
1529 	int old_opp_head_count = 0;
1530 
1531 	old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1532 
1533 	if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1534 		old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1535 									   &dc->current_state->res_ctx,
1536 									   old_opp_heads);
1537 	} else {
1538 		// DC cannot assume that the current state and the new state
1539 		// share the same OTG pipe since this is not true when called
1540 		// in the context of a commit stream not checked. Hence, set
1541 		// old_otg_master to NULL to skip the DSC configuration.
1542 		old_otg_master = NULL;
1543 	}
1544 
1545 
1546 	if (otg_master->stream_res.dsc)
1547 		dcn32_update_dsc_on_stream(otg_master,
1548 				otg_master->stream->timing.flags.DSC);
1549 	if (old_otg_master && old_otg_master->stream_res.dsc) {
1550 		for (i = 0; i < old_opp_head_count; i++) {
1551 			old_pipe = old_opp_heads[i];
1552 			new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1553 			if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
1554 				old_pipe->stream_res.dsc->funcs->dsc_disconnect(
1555 						old_pipe->stream_res.dsc);
1556 		}
1557 	}
1558 }
1559 
dcn401_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1560 void dcn401_update_odm(struct dc *dc, struct dc_state *context,
1561 		struct pipe_ctx *otg_master)
1562 {
1563 	struct pipe_ctx *opp_heads[MAX_PIPES];
1564 	int opp_inst[MAX_PIPES] = {0};
1565 	int opp_head_count;
1566 	int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1567 	int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1568 	int i;
1569 
1570 	opp_head_count = resource_get_opp_heads_for_otg_master(
1571 			otg_master, &context->res_ctx, opp_heads);
1572 
1573 	for (i = 0; i < opp_head_count; i++)
1574 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1575 	if (opp_head_count > 1)
1576 		otg_master->stream_res.tg->funcs->set_odm_combine(
1577 				otg_master->stream_res.tg,
1578 				opp_inst, opp_head_count,
1579 				odm_slice_width, last_odm_slice_width);
1580 	else
1581 		otg_master->stream_res.tg->funcs->set_odm_bypass(
1582 				otg_master->stream_res.tg,
1583 				&otg_master->stream->timing);
1584 
1585 	for (i = 0; i < opp_head_count; i++) {
1586 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
1587 				opp_heads[i]->stream_res.opp,
1588 				true);
1589 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
1590 				opp_heads[i]->stream_res.opp,
1591 				opp_heads[i]->stream->timing.pixel_encoding,
1592 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1593 	}
1594 
1595 	update_dsc_for_odm_change(dc, context, otg_master);
1596 
1597 	if (!resource_is_pipe_type(otg_master, DPP_PIPE))
1598 		/*
1599 		 * blank pattern is generated by OPP, reprogram blank pattern
1600 		 * due to OPP count change
1601 		 */
1602 		dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
1603 }
1604 
dcn401_add_dsc_sequence_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master,struct block_sequence_state * seq_state)1605 static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context,
1606 		struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
1607 {
1608 	struct pipe_ctx *old_pipe;
1609 	struct pipe_ctx *new_pipe;
1610 	struct pipe_ctx *old_opp_heads[MAX_PIPES];
1611 	struct pipe_ctx *old_otg_master;
1612 	int old_opp_head_count = 0;
1613 	int i;
1614 
1615 	old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1616 
1617 	if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1618 		old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1619 			&dc->current_state->res_ctx,
1620 			old_opp_heads);
1621 	} else {
1622 		old_otg_master = NULL;
1623 	}
1624 
1625 	/* Process new DSC configuration if DSC is enabled */
1626 	if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) {
1627 		struct dc_stream_state *stream = otg_master->stream;
1628 		struct pipe_ctx *odm_pipe;
1629 		int opp_cnt = 1;
1630 		int last_dsc_calc = 0;
1631 		bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) &&
1632 				stream->timing.pix_clk_100hz > 480000;
1633 
1634 		/* Count ODM pipes */
1635 		for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
1636 			opp_cnt++;
1637 
1638 		int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt;
1639 
1640 		/* Step 1: Set DTO DSCCLK for main DSC if needed */
1641 		if (should_use_dto_dscclk) {
1642 			hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
1643 					otg_master->stream_res.dsc->inst, num_slices_h);
1644 		}
1645 
1646 		/* Step 2: Calculate and set DSC config for main DSC */
1647 		last_dsc_calc = *seq_state->num_steps;
1648 		hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt);
1649 
1650 		/* Step 3: Enable main DSC block */
1651 		hwss_add_dsc_enable_with_opp(seq_state, otg_master);
1652 
1653 		/* Step 4: Configure and enable ODM DSC blocks */
1654 		for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
1655 			if (!odm_pipe->stream_res.dsc)
1656 				continue;
1657 
1658 			/* Set DTO DSCCLK for ODM DSC if needed */
1659 			if (should_use_dto_dscclk) {
1660 				hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
1661 						odm_pipe->stream_res.dsc->inst, num_slices_h);
1662 			}
1663 
1664 			/* Calculate and set DSC config for ODM DSC */
1665 			last_dsc_calc = *seq_state->num_steps;
1666 			hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt);
1667 
1668 			/* Enable ODM DSC block */
1669 			hwss_add_dsc_enable_with_opp(seq_state, odm_pipe);
1670 		}
1671 
1672 		/* Step 5: Configure DSC in timing generator */
1673 		hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg,
1674 			&seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true);
1675 	} else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) {
1676 		/* Disable DSC in OPTC */
1677 		hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false);
1678 
1679 		hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc);
1680 	}
1681 
1682 	/* Disable DSC for old pipes that no longer need it */
1683 	if (old_otg_master && old_otg_master->stream_res.dsc) {
1684 		for (i = 0; i < old_opp_head_count; i++) {
1685 			old_pipe = old_opp_heads[i];
1686 			new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1687 
1688 			/* If old pipe had DSC but new pipe doesn't, disable the old DSC */
1689 			if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
1690 				/* Then disconnect DSC block */
1691 				hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc);
1692 			}
1693 		}
1694 	}
1695 }
1696 
dcn401_update_odm_sequence(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master,struct block_sequence_state * seq_state)1697 void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
1698 		struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
1699 {
1700 	struct pipe_ctx *opp_heads[MAX_PIPES];
1701 	int opp_inst[MAX_PIPES] = {0};
1702 	int opp_head_count;
1703 	int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1704 	int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1705 	int i;
1706 
1707 	opp_head_count = resource_get_opp_heads_for_otg_master(
1708 			otg_master, &context->res_ctx, opp_heads);
1709 
1710 	for (i = 0; i < opp_head_count; i++)
1711 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1712 
1713 	/* Add ODM combine/bypass operation to sequence */
1714 	if (opp_head_count > 1) {
1715 		hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst,
1716 			opp_head_count, odm_slice_width, last_odm_slice_width);
1717 	} else {
1718 		hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing);
1719 	}
1720 
1721 	/* Add OPP operations to sequence */
1722 	for (i = 0; i < opp_head_count; i++) {
1723 		/* Add OPP pipe clock control operation */
1724 		hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true);
1725 
1726 		/* Add OPP program left edge extra pixel operation */
1727 		hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp,
1728 			opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1729 	}
1730 
1731 	/* Add DSC update operations to sequence */
1732 	dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state);
1733 
1734 	/* Add blank pixel data operation if needed */
1735 	if (!resource_is_pipe_type(otg_master, DPP_PIPE)) {
1736 		if (dc->hwseq->funcs.blank_pixel_data_sequence)
1737 			dc->hwseq->funcs.blank_pixel_data_sequence(
1738 				dc, otg_master, true, seq_state);
1739 	}
1740 }
1741 
dcn401_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)1742 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
1743 		struct dc_link_settings *link_settings)
1744 {
1745 	struct encoder_unblank_param params = {0};
1746 	struct dc_stream_state *stream = pipe_ctx->stream;
1747 	struct dc_link *link = stream->link;
1748 	struct dce_hwseq *hws = link->dc->hwseq;
1749 
1750 	/* calculate parameters for unblank */
1751 	params.opp_cnt = resource_get_odm_slice_count(pipe_ctx);
1752 
1753 	params.timing = pipe_ctx->stream->timing;
1754 	params.link_settings.link_rate = link_settings->link_rate;
1755 	params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle;
1756 
1757 	if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1758 		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
1759 				pipe_ctx->stream_res.hpo_dp_stream_enc,
1760 				pipe_ctx->stream_res.tg->inst);
1761 	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
1762 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
1763 	}
1764 
1765 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
1766 		hws->funcs.edp_backlight_control(link, true);
1767 }
1768 
dcn401_hardware_release(struct dc * dc)1769 void dcn401_hardware_release(struct dc *dc)
1770 {
1771 	if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
1772 		dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1773 
1774 		/* If pstate unsupported, or still supported
1775 		* by firmware, force it supported by dcn
1776 		*/
1777 		if (dc->current_state) {
1778 			if ((!dc->clk_mgr->clks.p_state_change_support ||
1779 					dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
1780 					dc->res_pool->hubbub->funcs->force_pstate_change_control)
1781 				dc->res_pool->hubbub->funcs->force_pstate_change_control(
1782 						dc->res_pool->hubbub, true, true);
1783 
1784 			dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1785 			dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1786 		}
1787 	} else {
1788 		if (dc->current_state) {
1789 			dc->clk_mgr->clks.p_state_change_support = false;
1790 			dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1791 		}
1792 		dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1793 	}
1794 }
1795 
dcn401_wait_for_det_buffer_update_under_otg_master(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1796 void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
1797 {
1798 	struct pipe_ctx *opp_heads[MAX_PIPES];
1799 	struct pipe_ctx *dpp_pipes[MAX_PIPES];
1800 	struct hubbub *hubbub = dc->res_pool->hubbub;
1801 	int dpp_count = 0;
1802 
1803 	if (!otg_master->stream)
1804 		return;
1805 
1806 	int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
1807 			&context->res_ctx, opp_heads);
1808 
1809 	for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
1810 		if (opp_heads[slice_idx]->plane_state) {
1811 			dpp_count = resource_get_dpp_pipes_for_opp_head(
1812 					opp_heads[slice_idx],
1813 					&context->res_ctx,
1814 					dpp_pipes);
1815 			for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
1816 				struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
1817 					if (dpp_pipe && hubbub &&
1818 						dpp_pipe->plane_res.hubp &&
1819 						hubbub->funcs->wait_for_det_update)
1820 						hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
1821 			}
1822 		} else {
1823 			if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update)
1824 				hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst);
1825 		}
1826 	}
1827 }
1828 
dcn401_interdependent_update_lock(struct dc * dc,struct dc_state * context,bool lock)1829 void dcn401_interdependent_update_lock(struct dc *dc,
1830 		struct dc_state *context, bool lock)
1831 {
1832 	unsigned int i = 0;
1833 	struct pipe_ctx *pipe = NULL;
1834 	struct timing_generator *tg = NULL;
1835 
1836 	if (lock) {
1837 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1838 			pipe = &context->res_ctx.pipe_ctx[i];
1839 			tg = pipe->stream_res.tg;
1840 
1841 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1842 					!tg->funcs->is_tg_enabled(tg) ||
1843 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1844 				continue;
1845 			dc->hwss.pipe_control_lock(dc, pipe, true);
1846 		}
1847 	} else {
1848 		/* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/
1849 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1850 			pipe = &context->res_ctx.pipe_ctx[i];
1851 			tg = pipe->stream_res.tg;
1852 
1853 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1854 					!tg->funcs->is_tg_enabled(tg) ||
1855 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1856 				continue;
1857 			}
1858 
1859 			if (dc->scratch.pipes_to_unlock_first[i]) {
1860 				struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1861 				dc->hwss.pipe_control_lock(dc, pipe, false);
1862 				/* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/
1863 				dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe);
1864 			}
1865 		}
1866 
1867 		/* Unlocking the rest of the pipes */
1868 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1869 			if (dc->scratch.pipes_to_unlock_first[i])
1870 				continue;
1871 
1872 			pipe = &context->res_ctx.pipe_ctx[i];
1873 			tg = pipe->stream_res.tg;
1874 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1875 					!tg->funcs->is_tg_enabled(tg) ||
1876 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1877 				continue;
1878 			}
1879 
1880 			dc->hwss.pipe_control_lock(dc, pipe, false);
1881 		}
1882 	}
1883 }
1884 
dcn401_perform_3dlut_wa_unlock(struct pipe_ctx * pipe_ctx)1885 void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
1886 {
1887 	/* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that
1888 	 * HUBP will properly fetch 3DLUT contents after unlock.
1889 	 *
1890 	 * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
1891 	 * of whether OTG lock is currently being held or not.
1892 	 */
1893 	struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
1894 	struct pipe_ctx *odm_pipe, *mpc_pipe;
1895 	int i, wa_pipe_ct = 0;
1896 
1897 	for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
1898 		for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
1899 			if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src
1900 						== DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
1901 					&& mpc_pipe->plane_state->mcm_shaper_3dlut_setting
1902 						== DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) {
1903 				wa_pipes[wa_pipe_ct++] = mpc_pipe;
1904 			}
1905 		}
1906 	}
1907 
1908 	if (wa_pipe_ct > 0) {
1909 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1910 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
1911 
1912 		for (i = 0; i < wa_pipe_ct; ++i) {
1913 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1914 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1915 		}
1916 
1917 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1918 		if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
1919 			pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
1920 
1921 		for (i = 0; i < wa_pipe_ct; ++i) {
1922 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1923 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1924 		}
1925 
1926 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1927 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
1928 	} else {
1929 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1930 	}
1931 }
1932 
dcn401_program_outstanding_updates(struct dc * dc,struct dc_state * context)1933 void dcn401_program_outstanding_updates(struct dc *dc,
1934 		struct dc_state *context)
1935 {
1936 	struct hubbub *hubbub = dc->res_pool->hubbub;
1937 
1938 	/* update compbuf if required */
1939 	if (hubbub->funcs->program_compbuf_segments)
1940 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1941 }
1942 
dcn401_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1943 void dcn401_reset_back_end_for_pipe(
1944 		struct dc *dc,
1945 		struct pipe_ctx *pipe_ctx,
1946 		struct dc_state *context)
1947 {
1948 	struct dc_link *link = pipe_ctx->stream->link;
1949 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
1950 
1951 	DC_LOGGER_INIT(dc->ctx->logger);
1952 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1953 		pipe_ctx->stream = NULL;
1954 		return;
1955 	}
1956 
1957 	/* DPMS may already disable or */
1958 	/* dpms_off status is incorrect due to fastboot
1959 	 * feature. When system resume from S4 with second
1960 	 * screen only, the dpms_off would be true but
1961 	 * VBIOS lit up eDP, so check link status too.
1962 	 */
1963 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1964 		dc->link_srv->set_dpms_off(pipe_ctx);
1965 	else if (pipe_ctx->stream_res.audio)
1966 		dc->hwss.disable_audio_stream(pipe_ctx);
1967 
1968 	/* free acquired resources */
1969 	if (pipe_ctx->stream_res.audio) {
1970 		/*disable az_endpoint*/
1971 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1972 
1973 		/*free audio*/
1974 		if (dc->caps.dynamic_audio == true) {
1975 			/*we have to dynamic arbitrate the audio endpoints*/
1976 			/*we free the resource, need reset is_audio_acquired*/
1977 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1978 					pipe_ctx->stream_res.audio, false);
1979 			pipe_ctx->stream_res.audio = NULL;
1980 		}
1981 	}
1982 
1983 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1984 	 * back end share by all pipes and will be disable only when disable
1985 	 * parent pipe.
1986 	 */
1987 	if (pipe_ctx->top_pipe == NULL) {
1988 
1989 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
1990 
1991 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1992 
1993 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1994 		if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
1995 			pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
1996 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
1997 
1998 		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
1999 
2000 		/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
2001 		 * the case where the same symclk is shared across multiple otg
2002 		 * instances
2003 		 */
2004 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
2005 			link->phy_state.symclk_ref_cnts.otg = 0;
2006 		if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
2007 			link_hwss->disable_link_output(link,
2008 					&pipe_ctx->link_res, pipe_ctx->stream->signal);
2009 			link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
2010 		}
2011 
2012 		/* reset DTBCLK_P */
2013 		if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
2014 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
2015 	}
2016 
2017 /*
2018  * In case of a dangling plane, setting this to NULL unconditionally
2019  * causes failures during reset hw ctx where, if stream is NULL,
2020  * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
2021  */
2022 	pipe_ctx->stream = NULL;
2023 	pipe_ctx->top_pipe = NULL;
2024 	pipe_ctx->bottom_pipe = NULL;
2025 	pipe_ctx->next_odm_pipe = NULL;
2026 	pipe_ctx->prev_odm_pipe = NULL;
2027 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
2028 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
2029 }
2030 
dcn401_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)2031 void dcn401_reset_hw_ctx_wrap(
2032 		struct dc *dc,
2033 		struct dc_state *context)
2034 {
2035 	int i;
2036 	struct dce_hwseq *hws = dc->hwseq;
2037 
2038 	/* Reset Back End*/
2039 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
2040 		struct pipe_ctx *pipe_ctx_old =
2041 			&dc->current_state->res_ctx.pipe_ctx[i];
2042 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2043 
2044 		if (!pipe_ctx_old->stream)
2045 			continue;
2046 
2047 		if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
2048 			continue;
2049 
2050 		if (!pipe_ctx->stream ||
2051 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
2052 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
2053 
2054 			if (hws->funcs.reset_back_end_for_pipe)
2055 				hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
2056 			if (hws->funcs.enable_stream_gating)
2057 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
2058 			if (old_clk)
2059 				old_clk->funcs->cs_power_down(old_clk);
2060 		}
2061 	}
2062 }
2063 
dcn401_calculate_vready_offset_for_group(struct pipe_ctx * pipe)2064 static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
2065 {
2066 	struct pipe_ctx *other_pipe;
2067 	unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
2068 
2069 	/* Always use the largest vready_offset of all connected pipes */
2070 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
2071 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2072 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2073 	}
2074 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
2075 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2076 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2077 	}
2078 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
2079 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2080 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2081 	}
2082 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
2083 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2084 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2085 	}
2086 
2087 	return vready_offset;
2088 }
2089 
dcn401_program_tg(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dce_hwseq * hws)2090 static void dcn401_program_tg(
2091 	struct dc *dc,
2092 	struct pipe_ctx *pipe_ctx,
2093 	struct dc_state *context,
2094 	struct dce_hwseq *hws)
2095 {
2096 	pipe_ctx->stream_res.tg->funcs->program_global_sync(
2097 		pipe_ctx->stream_res.tg,
2098 		dcn401_calculate_vready_offset_for_group(pipe_ctx),
2099 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2100 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2101 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2102 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2103 
2104 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
2105 		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2106 
2107 	pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2108 		pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2109 
2110 	if (hws->funcs.setup_vupdate_interrupt)
2111 		hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2112 }
2113 
dcn401_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2114 void dcn401_program_pipe(
2115 	struct dc *dc,
2116 	struct pipe_ctx *pipe_ctx,
2117 	struct dc_state *context)
2118 {
2119 	struct dce_hwseq *hws = dc->hwseq;
2120 
2121 	/* Only need to unblank on top pipe */
2122 	if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
2123 		if (pipe_ctx->update_flags.bits.enable ||
2124 			pipe_ctx->update_flags.bits.odm ||
2125 			pipe_ctx->stream->update_flags.bits.abm_level)
2126 			hws->funcs.blank_pixel_data(dc, pipe_ctx,
2127 				!pipe_ctx->plane_state ||
2128 				!pipe_ctx->plane_state->visible);
2129 	}
2130 
2131 	/* Only update TG on top pipe */
2132 	if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
2133 		&& !pipe_ctx->prev_odm_pipe)
2134 		dcn401_program_tg(dc, pipe_ctx, context, hws);
2135 
2136 	if (pipe_ctx->update_flags.bits.odm)
2137 		hws->funcs.update_odm(dc, context, pipe_ctx);
2138 
2139 	if (pipe_ctx->update_flags.bits.enable) {
2140 		if (hws->funcs.enable_plane)
2141 			hws->funcs.enable_plane(dc, pipe_ctx, context);
2142 		else
2143 			dc->hwss.enable_plane(dc, pipe_ctx, context);
2144 
2145 		if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
2146 			dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
2147 	}
2148 
2149 	if (pipe_ctx->update_flags.bits.det_size) {
2150 		if (dc->res_pool->hubbub->funcs->program_det_size)
2151 			dc->res_pool->hubbub->funcs->program_det_size(
2152 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
2153 		if (dc->res_pool->hubbub->funcs->program_det_segments)
2154 			dc->res_pool->hubbub->funcs->program_det_segments(
2155 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
2156 	}
2157 
2158 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
2159 	    pipe_ctx->plane_state->update_flags.raw ||
2160 	    pipe_ctx->stream->update_flags.raw))
2161 		dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
2162 
2163 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
2164 		pipe_ctx->plane_state->update_flags.bits.hdr_mult))
2165 		hws->funcs.set_hdr_multiplier(pipe_ctx);
2166 
2167 	if (pipe_ctx->plane_state &&
2168 		(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2169 			pipe_ctx->plane_state->update_flags.bits.gamma_change ||
2170 			pipe_ctx->plane_state->update_flags.bits.lut_3d ||
2171 			pipe_ctx->update_flags.bits.enable))
2172 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2173 
2174 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2175 	 * only do gamma programming for powering on, internal memcmp to avoid
2176 	 * updating on slave planes
2177 	 */
2178 	if (pipe_ctx->update_flags.bits.enable ||
2179 	    pipe_ctx->update_flags.bits.plane_changed ||
2180 	    pipe_ctx->stream->update_flags.bits.out_tf)
2181 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2182 
2183 	/* If the pipe has been enabled or has a different opp, we
2184 	 * should reprogram the fmt. This deals with cases where
2185 	 * interation between mpc and odm combine on different streams
2186 	 * causes a different pipe to be chosen to odm combine with.
2187 	 */
2188 	if (pipe_ctx->update_flags.bits.enable
2189 		|| pipe_ctx->update_flags.bits.opp_changed) {
2190 
2191 		pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
2192 			pipe_ctx->stream_res.opp,
2193 			COLOR_SPACE_YCBCR601,
2194 			pipe_ctx->stream->timing.display_color_depth,
2195 			pipe_ctx->stream->signal);
2196 
2197 		pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
2198 			pipe_ctx->stream_res.opp,
2199 			&pipe_ctx->stream->bit_depth_params,
2200 			&pipe_ctx->stream->clamping);
2201 	}
2202 
2203 	/* Set ABM pipe after other pipe configurations done */
2204 	if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
2205 		if (pipe_ctx->stream_res.abm) {
2206 			dc->hwss.set_pipe(pipe_ctx);
2207 			pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
2208 				pipe_ctx->stream->abm_level);
2209 		}
2210 	}
2211 
2212 	if (pipe_ctx->update_flags.bits.test_pattern_changed) {
2213 		struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
2214 		struct bit_depth_reduction_params params;
2215 
2216 		memset(&params, 0, sizeof(params));
2217 		odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
2218 		dc->hwss.set_disp_pattern_generator(dc,
2219 			pipe_ctx,
2220 			pipe_ctx->stream_res.test_pattern_params.test_pattern,
2221 			pipe_ctx->stream_res.test_pattern_params.color_space,
2222 			pipe_ctx->stream_res.test_pattern_params.color_depth,
2223 			NULL,
2224 			pipe_ctx->stream_res.test_pattern_params.width,
2225 			pipe_ctx->stream_res.test_pattern_params.height,
2226 			pipe_ctx->stream_res.test_pattern_params.offset);
2227 	}
2228 }
2229 
2230 /*
2231  * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe
2232  *
2233  * This function creates a sequence-based version of the original dcn401_program_pipe
2234  * function. Instead of directly calling hardware programming functions, it appends
2235  * sequence steps to the provided block_sequence array that can later be executed
2236  * as part of hwss_execute_sequence.
2237  *
2238  */
dcn401_program_pipe_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct block_sequence_state * seq_state)2239 void dcn401_program_pipe_sequence(
2240 	struct dc *dc,
2241 	struct pipe_ctx *pipe_ctx,
2242 	struct dc_state *context,
2243 	struct block_sequence_state *seq_state)
2244 {
2245 	struct dce_hwseq *hws = dc->hwseq;
2246 
2247 	/* Only need to unblank on top pipe */
2248 	if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
2249 		if (pipe_ctx->update_flags.bits.enable ||
2250 				pipe_ctx->update_flags.bits.odm ||
2251 				pipe_ctx->stream->update_flags.bits.abm_level) {
2252 			if (dc->hwseq->funcs.blank_pixel_data_sequence)
2253 				dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx,
2254 					 !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible,
2255 					 seq_state);
2256 		}
2257 	}
2258 
2259 	/* Only update TG on top pipe */
2260 	if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
2261 		&& !pipe_ctx->prev_odm_pipe) {
2262 
2263 		/* Step 1: Program global sync */
2264 		hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg,
2265 			dcn401_calculate_vready_offset_for_group(pipe_ctx),
2266 			(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2267 			(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2268 			(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2269 			(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2270 
2271 		/* Step 2: Wait for VACTIVE state (if not phantom pipe) */
2272 		if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
2273 			hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2274 
2275 		/* Step 3: Set VTG params */
2276 		hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2277 
2278 		/* Step 4: Setup vupdate interrupt (if available) */
2279 		if (hws->funcs.setup_vupdate_interrupt)
2280 			dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state);
2281 	}
2282 
2283 	if (pipe_ctx->update_flags.bits.odm) {
2284 		if (hws->funcs.update_odm_sequence)
2285 			hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state);
2286 	}
2287 
2288 	if (pipe_ctx->update_flags.bits.enable) {
2289 		if (dc->hwss.enable_plane_sequence)
2290 			dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state);
2291 	}
2292 
2293 	if (pipe_ctx->update_flags.bits.det_size) {
2294 		if (dc->res_pool->hubbub->funcs->program_det_size) {
2295 			hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub,
2296 				pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
2297 		}
2298 
2299 		if (dc->res_pool->hubbub->funcs->program_det_segments) {
2300 			hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub,
2301 				pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
2302 		}
2303 	}
2304 
2305 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
2306 	    pipe_ctx->plane_state->update_flags.raw ||
2307 	    pipe_ctx->stream->update_flags.raw)) {
2308 
2309 		if (dc->hwss.update_dchubp_dpp_sequence)
2310 			dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state);
2311 	}
2312 
2313 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
2314 		pipe_ctx->plane_state->update_flags.bits.hdr_mult)) {
2315 
2316 		hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state);
2317 	}
2318 
2319 	if (pipe_ctx->plane_state &&
2320 		(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2321 			pipe_ctx->plane_state->update_flags.bits.gamma_change ||
2322 			pipe_ctx->plane_state->update_flags.bits.lut_3d ||
2323 			pipe_ctx->update_flags.bits.enable)) {
2324 
2325 		hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state);
2326 	}
2327 
2328 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2329 	 * only do gamma programming for powering on, internal memcmp to avoid
2330 	 * updating on slave planes
2331 	 */
2332 	if (pipe_ctx->update_flags.bits.enable ||
2333 			pipe_ctx->update_flags.bits.plane_changed ||
2334 			pipe_ctx->stream->update_flags.bits.out_tf) {
2335 		hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream);
2336 	}
2337 
2338 	/* If the pipe has been enabled or has a different opp, we
2339 	 * should reprogram the fmt. This deals with cases where
2340 	 * interation between mpc and odm combine on different streams
2341 	 * causes a different pipe to be chosen to odm combine with.
2342 	 */
2343 	if (pipe_ctx->update_flags.bits.enable
2344 		|| pipe_ctx->update_flags.bits.opp_changed) {
2345 
2346 		hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601,
2347 			pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal);
2348 
2349 		hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp,
2350 			&pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping);
2351 	}
2352 
2353 	/* Set ABM pipe after other pipe configurations done */
2354 	if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
2355 		if (pipe_ctx->stream_res.abm) {
2356 			hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
2357 
2358 			hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level);
2359 		}
2360 	}
2361 
2362 	if (pipe_ctx->update_flags.bits.test_pattern_changed) {
2363 		struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
2364 
2365 		hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx);
2366 
2367 		hwss_add_opp_set_disp_pattern_generator(seq_state,
2368 			odm_opp,
2369 			pipe_ctx->stream_res.test_pattern_params.test_pattern,
2370 			pipe_ctx->stream_res.test_pattern_params.color_space,
2371 			pipe_ctx->stream_res.test_pattern_params.color_depth,
2372 			(struct tg_color){0},
2373 			false,
2374 			pipe_ctx->stream_res.test_pattern_params.width,
2375 			pipe_ctx->stream_res.test_pattern_params.height,
2376 			pipe_ctx->stream_res.test_pattern_params.offset);
2377 	}
2378 
2379 }
2380 
dcn401_program_front_end_for_ctx(struct dc * dc,struct dc_state * context)2381 void dcn401_program_front_end_for_ctx(
2382 	struct dc *dc,
2383 	struct dc_state *context)
2384 {
2385 	int i;
2386 	unsigned int prev_hubp_count = 0;
2387 	unsigned int hubp_count = 0;
2388 	struct dce_hwseq *hws = dc->hwseq;
2389 	struct pipe_ctx *pipe = NULL;
2390 
2391 	DC_LOGGER_INIT(dc->ctx->logger);
2392 
2393 	if (resource_is_pipe_topology_changed(dc->current_state, context))
2394 		resource_log_pipe_topology_update(dc, context);
2395 
2396 	if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2397 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2398 			pipe = &context->res_ctx.pipe_ctx[i];
2399 
2400 			if (pipe->plane_state) {
2401 				if (pipe->plane_state->triplebuffer_flips)
2402 					BREAK_TO_DEBUGGER();
2403 
2404 				/*turn off triple buffer for full update*/
2405 				dc->hwss.program_triplebuffer(
2406 					dc, pipe, pipe->plane_state->triplebuffer_flips);
2407 			}
2408 		}
2409 	}
2410 
2411 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2412 		if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
2413 			prev_hubp_count++;
2414 		if (context->res_ctx.pipe_ctx[i].plane_state)
2415 			hubp_count++;
2416 	}
2417 
2418 	if (prev_hubp_count == 0 && hubp_count > 0) {
2419 		if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2420 			dc->res_pool->hubbub->funcs->force_pstate_change_control(
2421 				dc->res_pool->hubbub, true, false);
2422 		udelay(500);
2423 	}
2424 
2425 	/* Set pipe update flags and lock pipes */
2426 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2427 		dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
2428 			&context->res_ctx.pipe_ctx[i]);
2429 
2430 	/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
2431 	 * buffer updates properly)
2432 	 */
2433 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2434 		struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
2435 
2436 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2437 
2438 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
2439 			dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
2440 			struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
2441 
2442 			if (tg->funcs->enable_crtc) {
2443 				if (dc->hwseq->funcs.blank_pixel_data)
2444 					dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
2445 
2446 				tg->funcs->enable_crtc(tg);
2447 			}
2448 		}
2449 	}
2450 	/* OTG blank before disabling all front ends */
2451 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2452 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2453 			&& !context->res_ctx.pipe_ctx[i].top_pipe
2454 			&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
2455 			&& context->res_ctx.pipe_ctx[i].stream)
2456 			hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
2457 
2458 	/* Disconnect mpcc */
2459 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2460 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2461 			|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
2462 			struct hubbub *hubbub = dc->res_pool->hubbub;
2463 
2464 			/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
2465 			 * then we want to do the programming here (effectively it's being disabled). If we do
2466 			 * the programming later the DET won't be updated until the OTG for the phantom pipe is
2467 			 * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
2468 			 * DET allocation.
2469 			 */
2470 			if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
2471 				(context->res_ctx.pipe_ctx[i].plane_state &&
2472 				dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
2473 				SUBVP_PHANTOM))) {
2474 				if (hubbub->funcs->program_det_size)
2475 					hubbub->funcs->program_det_size(hubbub,
2476 						dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2477 				if (dc->res_pool->hubbub->funcs->program_det_segments)
2478 					dc->res_pool->hubbub->funcs->program_det_segments(
2479 						hubbub,	dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2480 			}
2481 			hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
2482 				&dc->current_state->res_ctx.pipe_ctx[i]);
2483 			DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
2484 		}
2485 
2486 	/* update ODM for blanked OTG master pipes */
2487 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2488 		pipe = &context->res_ctx.pipe_ctx[i];
2489 		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
2490 			!resource_is_pipe_type(pipe, DPP_PIPE) &&
2491 			pipe->update_flags.bits.odm &&
2492 			hws->funcs.update_odm)
2493 			hws->funcs.update_odm(dc, context, pipe);
2494 	}
2495 
2496 	/*
2497 	 * Program all updated pipes, order matters for mpcc setup. Start with
2498 	 * top pipe and program all pipes that follow in order
2499 	 */
2500 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2501 		pipe = &context->res_ctx.pipe_ctx[i];
2502 
2503 		if (pipe->plane_state && !pipe->top_pipe) {
2504 			while (pipe) {
2505 				if (hws->funcs.program_pipe)
2506 					hws->funcs.program_pipe(dc, pipe, context);
2507 				else {
2508 					/* Don't program phantom pipes in the regular front end programming sequence.
2509 					 * There is an MPO transition case where a pipe being used by a video plane is
2510 					 * transitioned directly to be a phantom pipe when closing the MPO video.
2511 					 * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
2512 					 * right away) but the MPO still exists until the double buffered update of the
2513 					 * main pipe so we will get a frame of underflow if the phantom pipe is
2514 					 * programmed here.
2515 					 */
2516 					if (pipe->stream &&
2517 						dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
2518 						dcn401_program_pipe(dc, pipe, context);
2519 				}
2520 
2521 				pipe = pipe->bottom_pipe;
2522 			}
2523 		}
2524 
2525 		/* Program secondary blending tree and writeback pipes */
2526 		pipe = &context->res_ctx.pipe_ctx[i];
2527 		if (!pipe->top_pipe && !pipe->prev_odm_pipe
2528 			&& pipe->stream && pipe->stream->num_wb_info > 0
2529 			&& (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
2530 				|| pipe->stream->update_flags.raw)
2531 			&& hws->funcs.program_all_writeback_pipes_in_tree)
2532 			hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
2533 
2534 		/* Avoid underflow by check of pipe line read when adding 2nd plane. */
2535 		if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
2536 				!pipe->top_pipe &&
2537 				pipe->stream &&
2538 				pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
2539 				dc->current_state->stream_status[0].plane_count == 1 &&
2540 				context->stream_status[0].plane_count > 1) {
2541 			pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
2542 		}
2543 	}
2544 }
2545 
dcn401_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2546 void dcn401_post_unlock_program_front_end(
2547 	struct dc *dc,
2548 	struct dc_state *context)
2549 {
2550 	// Timeout for pipe enable
2551 	unsigned int timeout_us = 100000;
2552 	unsigned int polling_interval_us = 1;
2553 	struct dce_hwseq *hwseq = dc->hwseq;
2554 	int i;
2555 
2556 	DC_LOGGER_INIT(dc->ctx->logger);
2557 
2558 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2559 		if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
2560 			!resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
2561 			dc->hwss.post_unlock_reset_opp(dc,
2562 				&dc->current_state->res_ctx.pipe_ctx[i]);
2563 
2564 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2565 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2566 			dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
2567 
2568 	/*
2569 	 * If we are enabling a pipe, we need to wait for pending clear as this is a critical
2570 	 * part of the enable operation otherwise, DM may request an immediate flip which
2571 	 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
2572 	 * is unsupported on DCN.
2573 	 */
2574 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2575 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2576 		// Don't check flip pending on phantom pipes
2577 		if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
2578 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2579 			struct hubp *hubp = pipe->plane_res.hubp;
2580 			int j = 0;
2581 
2582 			for (j = 0; j < timeout_us / polling_interval_us
2583 				&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
2584 				udelay(polling_interval_us);
2585 		}
2586 	}
2587 
2588 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2589 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2590 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2591 
2592 		/* When going from a smaller ODM slice count to larger, we must ensure double
2593 		 * buffer update completes before we return to ensure we don't reduce DISPCLK
2594 		 * before we've transitioned to 2:1 or 4:1
2595 		 */
2596 		if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
2597 			resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
2598 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2599 			int j = 0;
2600 			struct timing_generator *tg = pipe->stream_res.tg;
2601 
2602 			if (tg->funcs->get_optc_double_buffer_pending) {
2603 				for (j = 0; j < timeout_us / polling_interval_us
2604 					&& tg->funcs->get_optc_double_buffer_pending(tg); j++)
2605 					udelay(polling_interval_us);
2606 			}
2607 		}
2608 	}
2609 
2610 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2611 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
2612 			dc->res_pool->hubbub, false, false);
2613 
2614 
2615 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2616 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2617 
2618 		if (pipe->plane_state && !pipe->top_pipe) {
2619 			/* Program phantom pipe here to prevent a frame of underflow in the MPO transition
2620 			 * case (if a pipe being used for a video plane transitions to a phantom pipe, it
2621 			 * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
2622 			 * programming sequence).
2623 			 */
2624 			while (pipe) {
2625 				if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
2626 					/* When turning on the phantom pipe we want to run through the
2627 					 * entire enable sequence, so apply all the "enable" flags.
2628 					 */
2629 					if (dc->hwss.apply_update_flags_for_phantom)
2630 						dc->hwss.apply_update_flags_for_phantom(pipe);
2631 					if (dc->hwss.update_phantom_vp_position)
2632 						dc->hwss.update_phantom_vp_position(dc, context, pipe);
2633 					dcn401_program_pipe(dc, pipe, context);
2634 				}
2635 				pipe = pipe->bottom_pipe;
2636 			}
2637 		}
2638 	}
2639 
2640 	if (!hwseq)
2641 		return;
2642 
2643 	/* P-State support transitions:
2644 	 * Natural -> FPO:      P-State disabled in prepare, force disallow anytime is safe
2645 	 * FPO -> Natural:      Unforce anytime after FW disable is safe (P-State will assert naturally)
2646 	 * Unsupported -> FPO:  P-State enabled in optimize, force disallow anytime is safe
2647 	 * FPO -> Unsupported:  P-State disabled in prepare, unforce disallow anytime is safe
2648 	 * FPO <-> SubVP:       Force disallow is maintained on the FPO / SubVP pipes
2649 	 */
2650 	if (hwseq->funcs.update_force_pstate)
2651 		dc->hwseq->funcs.update_force_pstate(dc, context);
2652 	/* Only program the MALL registers after all the main and phantom pipes
2653 	 * are done programming.
2654 	 */
2655 	if (hwseq->funcs.program_mall_pipe_config)
2656 		hwseq->funcs.program_mall_pipe_config(dc, context);
2657 
2658 	/* WA to apply WM setting*/
2659 	if (hwseq->wa.DEGVIDCN21)
2660 		dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
2661 
2662 
2663 	/* WA for stutter underflow during MPO transitions when adding 2nd plane */
2664 	if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
2665 
2666 		if (dc->current_state->stream_status[0].plane_count == 1 &&
2667 			context->stream_status[0].plane_count > 1) {
2668 
2669 			struct timing_generator *tg = dc->res_pool->timing_generators[0];
2670 
2671 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
2672 
2673 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
2674 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
2675 				tg->funcs->get_frame_count(tg);
2676 		}
2677 	}
2678 }
2679 
dcn401_update_bandwidth(struct dc * dc,struct dc_state * context)2680 bool dcn401_update_bandwidth(
2681 	struct dc *dc,
2682 	struct dc_state *context)
2683 {
2684 	int i;
2685 	struct dce_hwseq *hws = dc->hwseq;
2686 
2687 	/* recalculate DML parameters */
2688 	if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
2689 		return false;
2690 
2691 	/* apply updated bandwidth parameters */
2692 	dc->hwss.prepare_bandwidth(dc, context);
2693 
2694 	/* update hubp configs for all pipes */
2695 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2696 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2697 
2698 		if (pipe_ctx->plane_state == NULL)
2699 			continue;
2700 
2701 		if (pipe_ctx->top_pipe == NULL) {
2702 			bool blank = !is_pipe_tree_visible(pipe_ctx);
2703 
2704 			pipe_ctx->stream_res.tg->funcs->program_global_sync(
2705 				pipe_ctx->stream_res.tg,
2706 				dcn401_calculate_vready_offset_for_group(pipe_ctx),
2707 				(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2708 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2709 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2710 				(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2711 
2712 			pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2713 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
2714 
2715 			if (pipe_ctx->prev_odm_pipe == NULL)
2716 				hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2717 
2718 			if (hws->funcs.setup_vupdate_interrupt)
2719 				hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2720 		}
2721 
2722 		if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
2723 			pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
2724 				pipe_ctx->plane_res.hubp,
2725 				&pipe_ctx->hubp_regs,
2726 				&pipe_ctx->global_sync,
2727 				&pipe_ctx->stream->timing);
2728 	}
2729 
2730 	return true;
2731 }
2732 
dcn401_detect_pipe_changes(struct dc_state * old_state,struct dc_state * new_state,struct pipe_ctx * old_pipe,struct pipe_ctx * new_pipe)2733 void dcn401_detect_pipe_changes(struct dc_state *old_state,
2734 	struct dc_state *new_state,
2735 	struct pipe_ctx *old_pipe,
2736 	struct pipe_ctx *new_pipe)
2737 {
2738 	bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
2739 	bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
2740 
2741 	unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
2742 	unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
2743 	unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
2744 	unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
2745 	unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2746 	unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2747 	unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2748 	unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2749 
2750 	new_pipe->update_flags.raw = 0;
2751 
2752 	/* If non-phantom pipe is being transitioned to a phantom pipe,
2753 	 * set disable and return immediately. This is because the pipe
2754 	 * that was previously in use must be fully disabled before we
2755 	 * can "enable" it as a phantom pipe (since the OTG will certainly
2756 	 * be different). The post_unlock sequence will set the correct
2757 	 * update flags to enable the phantom pipe.
2758 	 */
2759 	if (old_pipe->plane_state && !old_is_phantom &&
2760 		new_pipe->plane_state && new_is_phantom) {
2761 		new_pipe->update_flags.bits.disable = 1;
2762 		return;
2763 	}
2764 
2765 	if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
2766 		resource_is_odm_topology_changed(new_pipe, old_pipe))
2767 		/* Detect odm changes */
2768 		new_pipe->update_flags.bits.odm = 1;
2769 
2770 	/* Exit on unchanged, unused pipe */
2771 	if (!old_pipe->plane_state && !new_pipe->plane_state)
2772 		return;
2773 	/* Detect pipe enable/disable */
2774 	if (!old_pipe->plane_state && new_pipe->plane_state) {
2775 		new_pipe->update_flags.bits.enable = 1;
2776 		new_pipe->update_flags.bits.mpcc = 1;
2777 		new_pipe->update_flags.bits.dppclk = 1;
2778 		new_pipe->update_flags.bits.hubp_interdependent = 1;
2779 		new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2780 		new_pipe->update_flags.bits.unbounded_req = 1;
2781 		new_pipe->update_flags.bits.gamut_remap = 1;
2782 		new_pipe->update_flags.bits.scaler = 1;
2783 		new_pipe->update_flags.bits.viewport = 1;
2784 		new_pipe->update_flags.bits.det_size = 1;
2785 		if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
2786 			new_pipe->stream_res.test_pattern_params.width != 0 &&
2787 			new_pipe->stream_res.test_pattern_params.height != 0)
2788 			new_pipe->update_flags.bits.test_pattern_changed = 1;
2789 		if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
2790 			new_pipe->update_flags.bits.odm = 1;
2791 			new_pipe->update_flags.bits.global_sync = 1;
2792 		}
2793 		return;
2794 	}
2795 
2796 	/* For SubVP we need to unconditionally enable because any phantom pipes are
2797 	 * always removed then newly added for every full updates whenever SubVP is in use.
2798 	 * The remove-add sequence of the phantom pipe always results in the pipe
2799 	 * being blanked in enable_stream_timing (DPG).
2800 	 */
2801 	if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
2802 		new_pipe->update_flags.bits.enable = 1;
2803 
2804 	/* Phantom pipes are effectively disabled, if the pipe was previously phantom
2805 	 * we have to enable
2806 	 */
2807 	if (old_pipe->plane_state && old_is_phantom &&
2808 		new_pipe->plane_state && !new_is_phantom)
2809 		new_pipe->update_flags.bits.enable = 1;
2810 
2811 	if (old_pipe->plane_state && !new_pipe->plane_state) {
2812 		new_pipe->update_flags.bits.disable = 1;
2813 		return;
2814 	}
2815 
2816 	/* Detect plane change */
2817 	if (old_pipe->plane_state != new_pipe->plane_state)
2818 		new_pipe->update_flags.bits.plane_changed = true;
2819 
2820 	/* Detect top pipe only changes */
2821 	if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
2822 		/* Detect global sync changes */
2823 		if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
2824 			|| (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
2825 			|| (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
2826 			|| (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
2827 			new_pipe->update_flags.bits.global_sync = 1;
2828 	}
2829 
2830 	if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
2831 		new_pipe->update_flags.bits.det_size = 1;
2832 
2833 	/*
2834 	 * Detect opp / tg change, only set on change, not on enable
2835 	 * Assume mpcc inst = pipe index, if not this code needs to be updated
2836 	 * since mpcc is what is affected by these. In fact all of our sequence
2837 	 * makes this assumption at the moment with how hubp reset is matched to
2838 	 * same index mpcc reset.
2839 	 */
2840 	if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2841 		new_pipe->update_flags.bits.opp_changed = 1;
2842 	if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
2843 		new_pipe->update_flags.bits.tg_changed = 1;
2844 
2845 	/*
2846 	 * Detect mpcc blending changes, only dpp inst and opp matter here,
2847 	 * mpccs getting removed/inserted update connected ones during their own
2848 	 * programming
2849 	 */
2850 	if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
2851 		|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2852 		new_pipe->update_flags.bits.mpcc = 1;
2853 
2854 	/* Detect dppclk change */
2855 	if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
2856 		new_pipe->update_flags.bits.dppclk = 1;
2857 
2858 	/* Check for scl update */
2859 	if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
2860 		new_pipe->update_flags.bits.scaler = 1;
2861 	/* Check for vp update */
2862 	if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
2863 		|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
2864 			&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
2865 		new_pipe->update_flags.bits.viewport = 1;
2866 
2867 	/* Detect dlg/ttu/rq updates */
2868 	{
2869 		struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
2870 		struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
2871 		struct dml2_display_rq_regs	 old_rq_regs = old_pipe->hubp_regs.rq_regs;
2872 		struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
2873 		struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
2874 		struct dml2_display_rq_regs	 *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
2875 
2876 		/* Detect pipe interdependent updates */
2877 		if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
2878 			|| (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
2879 			|| (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
2880 			|| (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
2881 			|| (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
2882 			|| (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
2883 			|| (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
2884 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
2885 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
2886 			|| (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
2887 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
2888 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
2889 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
2890 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
2891 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
2892 				new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
2893 			|| (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
2894 			|| (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
2895 			old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
2896 			old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
2897 			old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
2898 			old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
2899 			old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
2900 			old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
2901 			old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
2902 			old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
2903 			old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
2904 			old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
2905 			old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
2906 			old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
2907 			old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
2908 			old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
2909 			old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
2910 			old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
2911 			old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
2912 			new_pipe->update_flags.bits.hubp_interdependent = 1;
2913 		}
2914 		/* Detect any other updates to ttu/rq/dlg */
2915 		if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
2916 			memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
2917 			memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
2918 			new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2919 	}
2920 
2921 	if (old_pipe->unbounded_req != new_pipe->unbounded_req)
2922 		new_pipe->update_flags.bits.unbounded_req = 1;
2923 
2924 	if (memcmp(&old_pipe->stream_res.test_pattern_params,
2925 		&new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
2926 		new_pipe->update_flags.bits.test_pattern_changed = 1;
2927 	}
2928 }
2929 
dcn401_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)2930 void dcn401_plane_atomic_power_down(struct dc *dc,
2931 		struct dpp *dpp,
2932 		struct hubp *hubp)
2933 {
2934 	struct dce_hwseq *hws = dc->hwseq;
2935 	uint32_t org_ip_request_cntl = 0;
2936 
2937 	DC_LOGGER_INIT(dc->ctx->logger);
2938 
2939 	if (REG(DC_IP_REQUEST_CNTL)) {
2940 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
2941 		if (org_ip_request_cntl == 0)
2942 			REG_SET(DC_IP_REQUEST_CNTL, 0,
2943 				IP_REQUEST_EN, 1);
2944 	}
2945 
2946 	if (hws->funcs.dpp_pg_control)
2947 		hws->funcs.dpp_pg_control(hws, dpp->inst, false);
2948 
2949 	if (hws->funcs.hubp_pg_control)
2950 		hws->funcs.hubp_pg_control(hws, hubp->inst, false);
2951 
2952 	hubp->funcs->hubp_reset(hubp);
2953 	dpp->funcs->dpp_reset(dpp);
2954 
2955 	if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
2956 		REG_SET(DC_IP_REQUEST_CNTL, 0,
2957 			IP_REQUEST_EN, 0);
2958 
2959 	DC_LOG_DEBUG(
2960 			"Power gated front end %d\n", hubp->inst);
2961 
2962 	if (hws->funcs.dpp_root_clock_control)
2963 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
2964 }
2965 
dcn401_update_cursor_offload_pipe(struct dc * dc,const struct pipe_ctx * pipe)2966 void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
2967 {
2968 	volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
2969 	const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
2970 	const struct hubp *hubp = pipe->plane_res.hubp;
2971 	const struct dpp *dpp = pipe->plane_res.dpp;
2972 	volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p;
2973 	uint32_t stream_idx, write_idx, payload_idx;
2974 
2975 	if (!top_pipe || !hubp || !dpp)
2976 		return;
2977 
2978 	stream_idx = top_pipe->pipe_idx;
2979 	write_idx = cs->offload_streams[stream_idx].write_idx + 1; /*  new payload (+1) */
2980 	payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
2981 
2982 	p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401;
2983 
2984 	p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
2985 	p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
2986 	p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
2987 	p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
2988 	p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
2989 	p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
2990 	p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
2991 	p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
2992 	p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
2993 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
2994 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
2995 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
2996 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
2997 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
2998 
2999 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
3000 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
3001 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
3002 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
3003 	p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
3004 	p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
3005 
3006 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y =
3007 		dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y;
3008 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y =
3009 		dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y;
3010 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB =
3011 		dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb;
3012 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB =
3013 		dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb;
3014 
3015 	p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
3016 	p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
3017 	p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor;
3018 
3019 	cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
3020 }
3021 
dcn401_plane_atomic_power_down_sequence(struct dc * dc,struct dpp * dpp,struct hubp * hubp,struct block_sequence_state * seq_state)3022 void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
3023 		struct dpp *dpp,
3024 		struct hubp *hubp,
3025 		struct block_sequence_state *seq_state)
3026 {
3027 	struct dce_hwseq *hws = dc->hwseq;
3028 	uint32_t org_ip_request_cntl = 0;
3029 
3030 	DC_LOGGER_INIT(dc->ctx->logger);
3031 
3032 	/* Check and set DC_IP_REQUEST_CNTL if needed */
3033 	if (REG(DC_IP_REQUEST_CNTL)) {
3034 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
3035 		if (org_ip_request_cntl == 0)
3036 			hwss_add_dc_ip_request_cntl(seq_state, dc, true);
3037 	}
3038 
3039 	/* DPP power gating control */
3040 	hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false);
3041 
3042 	/* HUBP power gating control */
3043 	hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false);
3044 
3045 	/* HUBP reset */
3046 	hwss_add_hubp_reset(seq_state, hubp);
3047 
3048 	/* DPP reset */
3049 	hwss_add_dpp_reset(seq_state, dpp);
3050 
3051 	/* Restore DC_IP_REQUEST_CNTL if it was originally 0 */
3052 	if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
3053 		hwss_add_dc_ip_request_cntl(seq_state, dc, false);
3054 
3055 	DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst);
3056 
3057 	/* DPP root clock control */
3058 	hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false);
3059 }
3060 
3061 /* trigger HW to start disconnect plane from stream on the next vsync using block sequence */
dcn401_plane_atomic_disconnect_sequence(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3062 void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
3063 		struct dc_state *state,
3064 		struct pipe_ctx *pipe_ctx,
3065 		struct block_sequence_state *seq_state)
3066 {
3067 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3068 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
3069 	struct mpc *mpc = dc->res_pool->mpc;
3070 	struct mpc_tree *mpc_tree_params;
3071 	struct mpcc *mpcc_to_remove = NULL;
3072 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
3073 
3074 	mpc_tree_params = &(opp->mpc_tree_params);
3075 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
3076 
3077 	/*Already reset*/
3078 	if (mpcc_to_remove == NULL)
3079 		return;
3080 
3081 	/* Step 1: Remove MPCC from MPC tree */
3082 	hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove);
3083 
3084 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
3085 	// so don't wait for MPCC_IDLE in the programming sequence
3086 	if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) {
3087 		/* Step 2: Set MPCC disconnect pending flag */
3088 		hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true);
3089 	}
3090 
3091 	/* Step 3: Set optimized required flag */
3092 	hwss_add_dc_set_optimized_required(seq_state, dc, true);
3093 
3094 	/* Step 4: Disconnect HUBP if function exists */
3095 	if (hubp->funcs->hubp_disconnect)
3096 		hwss_add_hubp_disconnect(seq_state, hubp);
3097 
3098 	/* Step 5: Verify pstate change high if debug sanity checks are enabled */
3099 	if (dc->debug.sanity_checks)
3100 		dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
3101 }
3102 
dcn401_blank_pixel_data_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank,struct block_sequence_state * seq_state)3103 void dcn401_blank_pixel_data_sequence(
3104 	struct dc *dc,
3105 	struct pipe_ctx *pipe_ctx,
3106 	bool blank,
3107 	struct block_sequence_state *seq_state)
3108 {
3109 	struct tg_color black_color = {0};
3110 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
3111 	struct dc_stream_state *stream = pipe_ctx->stream;
3112 	enum dc_color_space color_space = stream->output_color_space;
3113 	enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
3114 	enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
3115 	struct pipe_ctx *odm_pipe;
3116 	struct rect odm_slice_src;
3117 
3118 	if (stream->link->test_pattern_enabled)
3119 		return;
3120 
3121 	/* get opp dpg blank color */
3122 	color_space_to_black_color(dc, color_space, &black_color);
3123 
3124 	if (blank) {
3125 		/* Set ABM immediate disable */
3126 		hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx);
3127 
3128 		if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
3129 			test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
3130 			test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
3131 		}
3132 	} else {
3133 		test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
3134 	}
3135 
3136 	odm_pipe = pipe_ctx;
3137 
3138 	/* Set display pattern generator for all ODM pipes */
3139 	while (odm_pipe->next_odm_pipe) {
3140 		odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
3141 
3142 		hwss_add_opp_set_disp_pattern_generator(seq_state,
3143 			odm_pipe->stream_res.opp,
3144 			test_pattern,
3145 			test_pattern_color_space,
3146 			stream->timing.display_color_depth,
3147 			black_color,
3148 			true,
3149 			odm_slice_src.width,
3150 			odm_slice_src.height,
3151 			odm_slice_src.x);
3152 
3153 		odm_pipe = odm_pipe->next_odm_pipe;
3154 	}
3155 
3156 	/* Set display pattern generator for final ODM pipe */
3157 	odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
3158 
3159 	hwss_add_opp_set_disp_pattern_generator(seq_state,
3160 		odm_pipe->stream_res.opp,
3161 		test_pattern,
3162 		test_pattern_color_space,
3163 		stream->timing.display_color_depth,
3164 		black_color,
3165 		true,
3166 		odm_slice_src.width,
3167 		odm_slice_src.height,
3168 		odm_slice_src.x);
3169 
3170 	/* Handle ABM level setting when not blanking */
3171 	if (!blank) {
3172 		if (stream_res->abm) {
3173 			/* Set pipe for ABM */
3174 			hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
3175 
3176 			/* Set ABM level */
3177 			hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level);
3178 		}
3179 	}
3180 }
3181 
dcn401_program_all_writeback_pipes_in_tree_sequence(struct dc * dc,const struct dc_stream_state * stream,struct dc_state * context,struct block_sequence_state * seq_state)3182 void dcn401_program_all_writeback_pipes_in_tree_sequence(
3183 		struct dc *dc,
3184 		const struct dc_stream_state *stream,
3185 		struct dc_state *context,
3186 		struct block_sequence_state *seq_state)
3187 {
3188 	struct dwbc *dwb;
3189 	int i_wb, i_pipe;
3190 
3191 	if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb)
3192 		return;
3193 
3194 	/* For each writeback pipe */
3195 	for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
3196 		/* Get direct pointer to writeback info */
3197 		struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb];
3198 		int mpcc_inst = -1;
3199 
3200 		if (wb_info->wb_enabled) {
3201 			/* Get the MPCC instance for writeback_source_plane */
3202 			for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
3203 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
3204 
3205 				if (!pipe_ctx->plane_state)
3206 					continue;
3207 
3208 				if (pipe_ctx->plane_state == wb_info->writeback_source_plane) {
3209 					mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
3210 					break;
3211 				}
3212 			}
3213 
3214 			if (mpcc_inst == -1) {
3215 				/* Disable writeback pipe and disconnect from MPCC
3216 				 * if source plane has been removed
3217 				 */
3218 				dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
3219 				continue;
3220 			}
3221 
3222 			ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
3223 			dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3224 
3225 			if (dwb->funcs->is_enabled(dwb)) {
3226 				/* Writeback pipe already enabled, only need to update */
3227 				dcn401_update_writeback_sequence(dc, wb_info, context, seq_state);
3228 			} else {
3229 				/* Enable writeback pipe and connect to MPCC */
3230 				dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state);
3231 			}
3232 		} else {
3233 			/* Disable writeback pipe and disconnect from MPCC */
3234 			dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
3235 		}
3236 	}
3237 }
3238 
dcn401_enable_writeback_sequence(struct dc * dc,struct dc_writeback_info * wb_info,struct dc_state * context,int mpcc_inst,struct block_sequence_state * seq_state)3239 void dcn401_enable_writeback_sequence(
3240 		struct dc *dc,
3241 		struct dc_writeback_info *wb_info,
3242 		struct dc_state *context,
3243 		int mpcc_inst,
3244 		struct block_sequence_state *seq_state)
3245 {
3246 	struct dwbc *dwb;
3247 	struct mcif_wb *mcif_wb;
3248 
3249 	if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
3250 		return;
3251 
3252 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3253 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
3254 
3255 	/* Update DWBC with new parameters */
3256 	hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
3257 
3258 	/* Configure MCIF_WB buffer settings */
3259 	hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
3260 
3261 	/* Configure MCIF_WB arbitration */
3262 	hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
3263 
3264 	/* Enable MCIF_WB */
3265 	hwss_add_mcif_wb_enable(seq_state, mcif_wb);
3266 
3267 	/* Set DWB MUX to connect writeback to MPCC */
3268 	hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst);
3269 
3270 	/* Enable DWBC */
3271 	hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params);
3272 }
3273 
dcn401_disable_writeback_sequence(struct dc * dc,struct dc_writeback_info * wb_info,struct block_sequence_state * seq_state)3274 void dcn401_disable_writeback_sequence(
3275 		struct dc *dc,
3276 		struct dc_writeback_info *wb_info,
3277 		struct block_sequence_state *seq_state)
3278 {
3279 	struct dwbc *dwb;
3280 	struct mcif_wb *mcif_wb;
3281 
3282 	if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
3283 		return;
3284 
3285 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3286 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
3287 
3288 	/* Disable DWBC */
3289 	hwss_add_dwbc_disable(seq_state, dwb);
3290 
3291 	/* Disable DWB MUX */
3292 	hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst);
3293 
3294 	/* Disable MCIF_WB */
3295 	hwss_add_mcif_wb_disable(seq_state, mcif_wb);
3296 }
3297 
dcn401_update_writeback_sequence(struct dc * dc,struct dc_writeback_info * wb_info,struct dc_state * context,struct block_sequence_state * seq_state)3298 void dcn401_update_writeback_sequence(
3299 		struct dc *dc,
3300 		struct dc_writeback_info *wb_info,
3301 		struct dc_state *context,
3302 		struct block_sequence_state *seq_state)
3303 {
3304 	struct dwbc *dwb;
3305 	struct mcif_wb *mcif_wb;
3306 
3307 	if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
3308 		return;
3309 
3310 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3311 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
3312 
3313 	/* Update writeback pipe */
3314 	hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
3315 
3316 	/* Update MCIF_WB buffer settings if needed */
3317 	hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
3318 }
3319 
find_free_gsl_group(const struct dc * dc)3320 static int find_free_gsl_group(const struct dc *dc)
3321 {
3322 	if (dc->res_pool->gsl_groups.gsl_0 == 0)
3323 		return 1;
3324 	if (dc->res_pool->gsl_groups.gsl_1 == 0)
3325 		return 2;
3326 	if (dc->res_pool->gsl_groups.gsl_2 == 0)
3327 		return 3;
3328 
3329 	return 0;
3330 }
3331 
dcn401_setup_gsl_group_as_lock_sequence(const struct dc * dc,struct pipe_ctx * pipe_ctx,bool enable,struct block_sequence_state * seq_state)3332 void dcn401_setup_gsl_group_as_lock_sequence(
3333 		const struct dc *dc,
3334 		struct pipe_ctx *pipe_ctx,
3335 		bool enable,
3336 		struct block_sequence_state *seq_state)
3337 {
3338 	struct gsl_params gsl;
3339 	int group_idx;
3340 
3341 	memset(&gsl, 0, sizeof(struct gsl_params));
3342 
3343 	if (enable) {
3344 		/* return if group already assigned since GSL was set up
3345 		 * for vsync flip, we would unassign so it can't be "left over"
3346 		 */
3347 		if (pipe_ctx->stream_res.gsl_group > 0)
3348 			return;
3349 
3350 		group_idx = find_free_gsl_group(dc);
3351 		ASSERT(group_idx != 0);
3352 		pipe_ctx->stream_res.gsl_group = group_idx;
3353 
3354 		/* set gsl group reg field and mark resource used */
3355 		switch (group_idx) {
3356 		case 1:
3357 			gsl.gsl0_en = 1;
3358 			dc->res_pool->gsl_groups.gsl_0 = 1;
3359 			break;
3360 		case 2:
3361 			gsl.gsl1_en = 1;
3362 			dc->res_pool->gsl_groups.gsl_1 = 1;
3363 			break;
3364 		case 3:
3365 			gsl.gsl2_en = 1;
3366 			dc->res_pool->gsl_groups.gsl_2 = 1;
3367 			break;
3368 		default:
3369 			BREAK_TO_DEBUGGER();
3370 			return; // invalid case
3371 		}
3372 		gsl.gsl_master_en = 1;
3373 	} else {
3374 		group_idx = pipe_ctx->stream_res.gsl_group;
3375 		if (group_idx == 0)
3376 			return; // if not in use, just return
3377 
3378 		pipe_ctx->stream_res.gsl_group = 0;
3379 
3380 		/* unset gsl group reg field and mark resource free */
3381 		switch (group_idx) {
3382 		case 1:
3383 			gsl.gsl0_en = 0;
3384 			dc->res_pool->gsl_groups.gsl_0 = 0;
3385 			break;
3386 		case 2:
3387 			gsl.gsl1_en = 0;
3388 			dc->res_pool->gsl_groups.gsl_1 = 0;
3389 			break;
3390 		case 3:
3391 			gsl.gsl2_en = 0;
3392 			dc->res_pool->gsl_groups.gsl_2 = 0;
3393 			break;
3394 		default:
3395 			BREAK_TO_DEBUGGER();
3396 			return;
3397 		}
3398 		gsl.gsl_master_en = 0;
3399 	}
3400 
3401 	hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl);
3402 	hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
3403 }
3404 
dcn401_disable_plane_sequence(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3405 void dcn401_disable_plane_sequence(
3406 		struct dc *dc,
3407 		struct dc_state *state,
3408 		struct pipe_ctx *pipe_ctx,
3409 		struct block_sequence_state *seq_state)
3410 {
3411 	bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
3412 	struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
3413 
3414 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
3415 		return;
3416 
3417 	/* Wait for MPCC disconnect */
3418 	if (dc->hwss.wait_for_mpcc_disconnect_sequence)
3419 		dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state);
3420 
3421 	/* In flip immediate with pipe splitting case GSL is used for synchronization
3422 	 * so we must disable it when the plane is disabled.
3423 	 */
3424 	if (pipe_ctx->stream_res.gsl_group != 0)
3425 		dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state);
3426 
3427 	/* Update HUBP mall sel */
3428 	if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel)
3429 		hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false);
3430 
3431 	/* Set flip control GSL */
3432 	hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false);
3433 
3434 	/* HUBP clock control */
3435 	hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false);
3436 
3437 	/* DPP clock control */
3438 	hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false);
3439 
3440 	/* Plane atomic power down */
3441 	if (dc->hwseq->funcs.plane_atomic_power_down_sequence)
3442 		dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp,
3443 			pipe_ctx->plane_res.hubp, seq_state);
3444 
3445 	pipe_ctx->stream = NULL;
3446 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
3447 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
3448 	pipe_ctx->top_pipe = NULL;
3449 	pipe_ctx->bottom_pipe = NULL;
3450 	pipe_ctx->prev_odm_pipe = NULL;
3451 	pipe_ctx->next_odm_pipe = NULL;
3452 	pipe_ctx->plane_state = NULL;
3453 
3454 	/* Turn back off the phantom OTG after the phantom plane is fully disabled */
3455 	if (is_phantom && tg && tg->funcs->disable_phantom_crtc)
3456 		hwss_add_disable_phantom_crtc(seq_state, tg);
3457 }
3458 
dcn401_post_unlock_reset_opp_sequence(struct dc * dc,struct pipe_ctx * opp_head,struct block_sequence_state * seq_state)3459 void dcn401_post_unlock_reset_opp_sequence(
3460 		struct dc *dc,
3461 		struct pipe_ctx *opp_head,
3462 		struct block_sequence_state *seq_state)
3463 {
3464 	struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
3465 	struct dccg *dccg = dc->res_pool->dccg;
3466 
3467 	/* Wait for all DPP pipes in current mpc blending tree completes double
3468 	 * buffered disconnection before resetting OPP
3469 	 */
3470 	if (dc->hwss.wait_for_mpcc_disconnect_sequence)
3471 		dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state);
3472 
3473 	if (dsc) {
3474 		bool *is_ungated = NULL;
3475 		/* Check DSC power gate status */
3476 		if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status)
3477 			hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false);
3478 
3479 		/* Seamless update specific where we will postpone non
3480 		 * double buffered DSCCLK disable logic in post unlock
3481 		 * sequence after DSC is disconnected from OPP but not
3482 		 * yet power gated.
3483 		 */
3484 
3485 		/* DSC wait disconnect pending clear */
3486 		hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated);
3487 
3488 		/* DSC disable */
3489 		hwss_add_dsc_disable(seq_state, dsc, is_ungated);
3490 
3491 		/* Set reference DSCCLK */
3492 		if (dccg && dccg->funcs->set_ref_dscclk)
3493 			hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0);
3494 	}
3495 }
3496 
dcn401_dc_ip_request_cntl(struct dc * dc,bool enable)3497 void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable)
3498 {
3499 	struct dce_hwseq *hws = dc->hwseq;
3500 
3501 	if (REG(DC_IP_REQUEST_CNTL))
3502 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0);
3503 }
3504 
dcn401_enable_plane_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct block_sequence_state * seq_state)3505 void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
3506 				 struct dc_state *context,
3507 				 struct block_sequence_state *seq_state)
3508 {
3509 	struct dce_hwseq *hws = dc->hwseq;
3510 	uint32_t org_ip_request_cntl = 0;
3511 
3512 	if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp)
3513 		return;
3514 
3515 	if (REG(DC_IP_REQUEST_CNTL))
3516 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
3517 
3518 	/* Step 1: DPP root clock control - enable clock */
3519 	if (hws->funcs.dpp_root_clock_control)
3520 		hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
3521 
3522 	/* Step 2: Enable DC IP request (if needed) */
3523 	if (hws->funcs.dc_ip_request_cntl)
3524 		hwss_add_dc_ip_request_cntl(seq_state, dc, true);
3525 
3526 	/* Step 3: DPP power gating control - power on */
3527 	if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control)
3528 		hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
3529 
3530 	/* Step 4: HUBP power gating control - power on */
3531 	if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control)
3532 		hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true);
3533 
3534 	/* Step 5: Disable DC IP request (restore state) */
3535 	if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl)
3536 		hwss_add_dc_ip_request_cntl(seq_state, dc, false);
3537 
3538 	/* Step 6: HUBP clock control - enable DCFCLK */
3539 	if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl)
3540 		hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true);
3541 
3542 	/* Step 7: HUBP initialization */
3543 	if (pipe_ctx->plane_res.hubp->funcs->hubp_init)
3544 		hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp);
3545 
3546 	/* Step 8: OPP pipe clock control - enable */
3547 	if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control)
3548 		hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true);
3549 
3550 	/* Step 9: VM system aperture settings */
3551 	if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) {
3552 		hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0,
3553 			dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr);
3554 	}
3555 
3556 	/* Step 10: Flip interrupt setup */
3557 	if (!pipe_ctx->top_pipe
3558 			&& pipe_ctx->plane_state
3559 			&& pipe_ctx->plane_state->flip_int_enabled
3560 			&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) {
3561 		hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp);
3562 	}
3563 }
3564 
dcn401_update_dchubp_dpp_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct block_sequence_state * seq_state)3565 void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
3566 				       struct pipe_ctx *pipe_ctx,
3567 				       struct dc_state *context,
3568 				       struct block_sequence_state *seq_state)
3569 {
3570 	struct dce_hwseq *hws = dc->hwseq;
3571 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3572 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3573 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3574 	struct dccg *dccg = dc->res_pool->dccg;
3575 	bool viewport_changed = false;
3576 	enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
3577 
3578 	if (!hubp || !dpp || !plane_state)
3579 		return;
3580 
3581 	/* Step 1: DPP DPPCLK control */
3582 	if (pipe_ctx->update_flags.bits.dppclk)
3583 		hwss_add_dpp_dppclk_control(seq_state, dpp, false, true);
3584 
3585 	/* Step 2: DCCG update DPP DTO */
3586 	if (pipe_ctx->update_flags.bits.enable)
3587 		hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
3588 
3589 	/* Step 3: HUBP VTG selection */
3590 	if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
3591 		hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst);
3592 
3593 		/* Step 4: HUBP setup (choose setup2 or setup) */
3594 		if (hubp->funcs->hubp_setup2) {
3595 			hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs,
3596 				&pipe_ctx->global_sync, &pipe_ctx->stream->timing);
3597 		} else if (hubp->funcs->hubp_setup) {
3598 			hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs,
3599 				&pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param);
3600 		}
3601 	}
3602 
3603 	/* Step 5: Set unbounded requesting */
3604 	if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
3605 		hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req);
3606 
3607 	/* Step 6: HUBP interdependent setup */
3608 	if (pipe_ctx->update_flags.bits.hubp_interdependent) {
3609 		if (hubp->funcs->hubp_setup_interdependent2)
3610 			hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs);
3611 		else if (hubp->funcs->hubp_setup_interdependent)
3612 			hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs);
3613 	}
3614 
3615 	/* Step 7: DPP setup - input CSC and format setup */
3616 	if (pipe_ctx->update_flags.bits.enable ||
3617 			pipe_ctx->update_flags.bits.plane_changed ||
3618 			plane_state->update_flags.bits.bpp_change ||
3619 			plane_state->update_flags.bits.input_csc_change ||
3620 			plane_state->update_flags.bits.color_space_change ||
3621 			plane_state->update_flags.bits.coeff_reduction_change) {
3622 		hwss_add_dpp_setup_dpp(seq_state, pipe_ctx);
3623 
3624 		/* Step 8: DPP cursor matrix setup */
3625 		if (dpp->funcs->set_cursor_matrix) {
3626 			hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space,
3627 				&plane_state->cursor_csc_color_matrix);
3628 		}
3629 
3630 		/* Step 9: DPP program bias and scale */
3631 		if (dpp->funcs->dpp_program_bias_and_scale)
3632 			hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx);
3633 	}
3634 
3635 	/* Step 10: MPCC updates */
3636 	if (pipe_ctx->update_flags.bits.mpcc ||
3637 	     pipe_ctx->update_flags.bits.plane_changed ||
3638 	     plane_state->update_flags.bits.global_alpha_change ||
3639 	     plane_state->update_flags.bits.per_pixel_alpha_change) {
3640 
3641 		/* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */
3642 		if (hws->funcs.update_mpcc_sequence)
3643 			hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state);
3644 	}
3645 
3646 	/* Step 11: DPP scaler setup */
3647 	if (pipe_ctx->update_flags.bits.scaler ||
3648 			plane_state->update_flags.bits.scaling_change ||
3649 			plane_state->update_flags.bits.position_change ||
3650 			plane_state->update_flags.bits.per_pixel_alpha_change ||
3651 			pipe_ctx->stream->update_flags.bits.scaling) {
3652 		pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
3653 		ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
3654 		hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
3655 	}
3656 
3657 	/* Step 12: HUBP viewport programming */
3658 	if (pipe_ctx->update_flags.bits.viewport ||
3659 	     (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
3660 	     (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
3661 	     (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
3662 		hwss_add_hubp_mem_program_viewport(seq_state, hubp,
3663 			&pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
3664 		viewport_changed = true;
3665 	}
3666 
3667 	/* Step 13: HUBP program mcache if available */
3668 	if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
3669 		hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs);
3670 
3671 	/* Step 14: Cursor attribute setup */
3672 	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
3673 	     pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
3674 	    pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
3675 
3676 		hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx);
3677 
3678 		hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx);
3679 
3680 		/* Step 15: Cursor position setup */
3681 		hwss_add_set_cursor_position(seq_state, dc, pipe_ctx);
3682 
3683 		/* Step 16: Cursor SDR white level */
3684 		if (dc->hwss.set_cursor_sdr_white_level)
3685 			hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx);
3686 	}
3687 
3688 	/* Step 17: Gamut remap and output CSC */
3689 	if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
3690 			pipe_ctx->update_flags.bits.plane_changed ||
3691 			pipe_ctx->stream->update_flags.bits.gamut_remap ||
3692 			plane_state->update_flags.bits.gamut_remap_change ||
3693 			pipe_ctx->stream->update_flags.bits.out_csc) {
3694 
3695 		/* Gamut remap */
3696 		hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx);
3697 
3698 		/* Output CSC */
3699 		hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space,
3700 			pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id);
3701 	}
3702 
3703 	/* Step 18: HUBP surface configuration */
3704 	if (pipe_ctx->update_flags.bits.enable ||
3705 			pipe_ctx->update_flags.bits.plane_changed ||
3706 			pipe_ctx->update_flags.bits.opp_changed ||
3707 			plane_state->update_flags.bits.pixel_format_change ||
3708 			plane_state->update_flags.bits.horizontal_mirror_change ||
3709 			plane_state->update_flags.bits.rotation_change ||
3710 			plane_state->update_flags.bits.swizzle_change ||
3711 			plane_state->update_flags.bits.dcc_change ||
3712 			plane_state->update_flags.bits.bpp_change ||
3713 			plane_state->update_flags.bits.scaling_change ||
3714 			plane_state->update_flags.bits.plane_size_change) {
3715 		struct plane_size size = plane_state->plane_size;
3716 
3717 		size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
3718 		hwss_add_hubp_program_surface_config(seq_state, hubp,
3719 				plane_state->format, &plane_state->tiling_info, size,
3720 				plane_state->rotation, &plane_state->dcc,
3721 				plane_state->horizontal_mirror, 0);
3722 		hubp->power_gated = false;
3723 	}
3724 
3725 	/* Step 19: Update plane address (with SubVP support) */
3726 	if (pipe_ctx->update_flags.bits.enable ||
3727 	     pipe_ctx->update_flags.bits.plane_changed ||
3728 	     plane_state->update_flags.bits.addr_update) {
3729 
3730 		/* SubVP save surface address if needed */
3731 		if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) {
3732 			hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv,
3733 				&pipe_ctx->plane_state->address, pipe_ctx->subvp_index);
3734 		}
3735 
3736 		/* Update plane address */
3737 		hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx);
3738 	}
3739 
3740 	/* Step 20: HUBP set blank - enable plane */
3741 	if (pipe_ctx->update_flags.bits.enable)
3742 		hwss_add_hubp_set_blank(seq_state, hubp, false);
3743 
3744 	/* Step 21: Phantom HUBP post enable */
3745 	if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable)
3746 		hwss_add_phantom_hubp_post_enable(seq_state, hubp);
3747 }
3748 
dcn401_update_mpcc_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3749 void dcn401_update_mpcc_sequence(struct dc *dc,
3750 				struct pipe_ctx *pipe_ctx,
3751 				struct block_sequence_state *seq_state)
3752 {
3753 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3754 	struct mpcc_blnd_cfg blnd_cfg = {0};
3755 	bool per_pixel_alpha;
3756 	int mpcc_id;
3757 	struct mpcc *new_mpcc;
3758 	struct mpc *mpc = dc->res_pool->mpc;
3759 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
3760 
3761 	if (!hubp || !pipe_ctx->plane_state)
3762 		return;
3763 
3764 	per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
3765 
3766 	/* Initialize blend configuration */
3767 	blnd_cfg.overlap_only = false;
3768 	blnd_cfg.global_gain = 0xff;
3769 
3770 	if (per_pixel_alpha) {
3771 		blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
3772 		if (pipe_ctx->plane_state->global_alpha) {
3773 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
3774 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
3775 		} else {
3776 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
3777 		}
3778 	} else {
3779 		blnd_cfg.pre_multiplied_alpha = false;
3780 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
3781 	}
3782 
3783 	if (pipe_ctx->plane_state->global_alpha)
3784 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
3785 	else
3786 		blnd_cfg.global_alpha = 0xff;
3787 
3788 	blnd_cfg.background_color_bpc = 4;
3789 	blnd_cfg.bottom_gain_mode = 0;
3790 	blnd_cfg.top_gain = 0x1f000;
3791 	blnd_cfg.bottom_inside_gain = 0x1f000;
3792 	blnd_cfg.bottom_outside_gain = 0x1f000;
3793 
3794 	if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
3795 		blnd_cfg.pre_multiplied_alpha = false;
3796 
3797 	/* MPCC instance is equal to HUBP instance */
3798 	mpcc_id = hubp->inst;
3799 
3800 	/* Step 1: Update blending if no full update needed */
3801 	if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
3802 	    !pipe_ctx->update_flags.bits.mpcc) {
3803 
3804 		/* Update blending configuration */
3805 		hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id);
3806 
3807 		/* Update visual confirm color */
3808 		hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
3809 		return;
3810 	}
3811 
3812 	/* Step 2: Get existing MPCC for DPP */
3813 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
3814 
3815 	/* Step 3: Remove MPCC if being used */
3816 	if (new_mpcc != NULL) {
3817 		hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc);
3818 	} else {
3819 		/* Step 4: Assert MPCC idle (debug only) */
3820 		if (dc->debug.sanity_checks)
3821 			hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id);
3822 	}
3823 
3824 	/* Step 5: Insert new plane into MPC tree */
3825 	hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id);
3826 
3827 	/* Step 6: Update visual confirm color */
3828 	hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
3829 
3830 	/* Step 7: Set HUBP OPP and MPCC IDs */
3831 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
3832 	hubp->mpcc_id = mpcc_id;
3833 }
3834 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3835 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3836 {
3837 	int i;
3838 
3839 	for (i = 0; i < res_pool->pipe_count; i++) {
3840 		if (res_pool->hubps[i]->inst == mpcc_inst)
3841 			return res_pool->hubps[i];
3842 	}
3843 	ASSERT(false);
3844 	return NULL;
3845 }
3846 
dcn401_wait_for_mpcc_disconnect_sequence(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3847 void dcn401_wait_for_mpcc_disconnect_sequence(
3848 		struct dc *dc,
3849 		struct resource_pool *res_pool,
3850 		struct pipe_ctx *pipe_ctx,
3851 		struct block_sequence_state *seq_state)
3852 {
3853 	int mpcc_inst;
3854 
3855 	if (dc->debug.sanity_checks)
3856 		dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
3857 
3858 	if (!pipe_ctx->stream_res.opp)
3859 		return;
3860 
3861 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3862 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3863 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3864 
3865 			if (pipe_ctx->stream_res.tg &&
3866 				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
3867 				hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst);
3868 			}
3869 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3870 			if (hubp)
3871 				hwss_add_hubp_set_blank(seq_state, hubp, true);
3872 		}
3873 	}
3874 
3875 	if (dc->debug.sanity_checks)
3876 		dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
3877 }
3878 
dcn401_setup_vupdate_interrupt_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3879 void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
3880 		struct block_sequence_state *seq_state)
3881 {
3882 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3883 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3884 
3885 	if (start_line < 0)
3886 		start_line = 0;
3887 
3888 	if (tg->funcs->setup_vertical_interrupt2)
3889 		hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line);
3890 }
3891 
dcn401_set_hdr_multiplier_sequence(struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3892 void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
3893 		struct block_sequence_state *seq_state)
3894 {
3895 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
3896 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
3897 	struct custom_float_format fmt;
3898 
3899 	fmt.exponenta_bits = 6;
3900 	fmt.mantissa_bits = 12;
3901 	fmt.sign = true;
3902 
3903 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
3904 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
3905 
3906 	hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult);
3907 }
3908 
dcn401_program_mall_pipe_config_sequence(struct dc * dc,struct dc_state * context,struct block_sequence_state * seq_state)3909 void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
3910 		struct block_sequence_state *seq_state)
3911 {
3912 	int i;
3913 	unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context);
3914 	bool cache_cursor = false;
3915 
3916 	// Don't force p-state disallow -- can't block dummy p-state
3917 
3918 	// Update MALL_SEL register for each pipe (break down update_mall_sel call)
3919 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3920 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3921 		struct hubp *hubp = pipe->plane_res.hubp;
3922 
3923 		if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
3924 			int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
3925 
3926 			switch (hubp->curs_attr.color_format) {
3927 			case CURSOR_MODE_MONO:
3928 				cursor_size /= 2;
3929 				break;
3930 			case CURSOR_MODE_COLOR_1BIT_AND:
3931 			case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
3932 			case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
3933 				cursor_size *= 4;
3934 				break;
3935 
3936 			case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
3937 			case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
3938 			default:
3939 				cursor_size *= 8;
3940 				break;
3941 			}
3942 
3943 			if (cursor_size > 16384)
3944 				cache_cursor = true;
3945 
3946 			if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
3947 				hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false);
3948 			} else {
3949 				// MALL not supported with Stereo3D
3950 				uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways &&
3951 					pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
3952 					pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
3953 					!pipe->plane_state->address.tmz_surface) ? 2 : 0;
3954 				hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor);
3955 			}
3956 		}
3957 	}
3958 
3959 	// Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
3960 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3961 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3962 		struct hubp *hubp = pipe->plane_res.hubp;
3963 
3964 		if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) {
3965 			if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
3966 				hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true);
3967 		}
3968 	}
3969 }
3970 
dcn401_verify_allow_pstate_change_high_sequence(struct dc * dc,struct block_sequence_state * seq_state)3971 void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
3972 		struct block_sequence_state *seq_state)
3973 {
3974 	struct hubbub *hubbub = dc->res_pool->hubbub;
3975 
3976 	if (!hubbub->funcs->verify_allow_pstate_change_high)
3977 		return;
3978 
3979 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
3980 		/* Attempt hardware workaround force recovery */
3981 		dcn401_hw_wa_force_recovery_sequence(dc, seq_state);
3982 	}
3983 }
3984 
dcn401_hw_wa_force_recovery_sequence(struct dc * dc,struct block_sequence_state * seq_state)3985 bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
3986 		struct block_sequence_state *seq_state)
3987 {
3988 	struct hubp *hubp;
3989 	unsigned int i;
3990 
3991 	if (!dc->debug.recovery_enabled)
3992 		return false;
3993 
3994 	/* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */
3995 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3996 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
3997 
3998 		if (pipe_ctx != NULL) {
3999 			hubp = pipe_ctx->plane_res.hubp;
4000 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
4001 				hwss_add_hubp_set_blank_en(seq_state, hubp, true);
4002 		}
4003 	}
4004 
4005 	/* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */
4006 	hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true);
4007 
4008 	/* Step 3: Set HUBP_DISABLE=1 for all active pipes */
4009 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4010 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
4011 
4012 		if (pipe_ctx != NULL) {
4013 			hubp = pipe_ctx->plane_res.hubp;
4014 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
4015 				hwss_add_hubp_disable_control(seq_state, hubp, true);
4016 		}
4017 	}
4018 
4019 	/* Step 4: Set HUBP_DISABLE=0 for all active pipes */
4020 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4021 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
4022 
4023 		if (pipe_ctx != NULL) {
4024 			hubp = pipe_ctx->plane_res.hubp;
4025 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
4026 				hwss_add_hubp_disable_control(seq_state, hubp, false);
4027 		}
4028 	}
4029 
4030 	/* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */
4031 	hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false);
4032 
4033 	/* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */
4034 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4035 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
4036 
4037 		if (pipe_ctx != NULL) {
4038 			hubp = pipe_ctx->plane_res.hubp;
4039 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
4040 				hwss_add_hubp_set_blank_en(seq_state, hubp, false);
4041 		}
4042 	}
4043 
4044 	return true;
4045 }
4046